You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 28 kB

4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
3 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
4 years ago
3 years ago
5 years ago
3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/modules/base"
  16. "code.gitea.io/gitea/models"
  17. "code.gitea.io/gitea/modules/context"
  18. "code.gitea.io/gitea/modules/labelmsg"
  19. "code.gitea.io/gitea/modules/log"
  20. "code.gitea.io/gitea/modules/minio_ext"
  21. "code.gitea.io/gitea/modules/notification"
  22. "code.gitea.io/gitea/modules/setting"
  23. "code.gitea.io/gitea/modules/storage"
  24. "code.gitea.io/gitea/modules/upload"
  25. "code.gitea.io/gitea/modules/worker"
  26. gouuid "github.com/satori/go.uuid"
  27. )
  28. const (
  29. //result of decompress
  30. DecompressSuccess = "0"
  31. DecompressFailed = "1"
  32. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  33. )
  34. type CloudBrainDataset struct {
  35. UUID string `json:"id"`
  36. Name string `json:"name"`
  37. Path string `json:"place"`
  38. UserName string `json:"provider"`
  39. CreateTime string `json:"created_at"`
  40. }
  41. type UploadForm struct {
  42. UploadID string `form:"uploadId"`
  43. UuID string `form:"uuid"`
  44. PartSize int64 `form:"size"`
  45. Offset int64 `form:"offset"`
  46. PartNumber int `form:"chunkNumber"`
  47. PartFile multipart.File `form:"file"`
  48. }
  49. func RenderAttachmentSettings(ctx *context.Context) {
  50. renderAttachmentSettings(ctx)
  51. }
  52. func renderAttachmentSettings(ctx *context.Context) {
  53. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  54. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  55. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  56. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  57. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  58. }
  59. func UploadAttachmentUI(ctx *context.Context) {
  60. ctx.Data["datasetId"] = ctx.Query("datasetId")
  61. ctx.HTML(200, tplAttachmentUpload)
  62. }
  63. // UploadAttachment response for uploading issue's attachment
  64. func UploadAttachment(ctx *context.Context) {
  65. if !setting.Attachment.Enabled {
  66. ctx.Error(404, "attachment is not enabled")
  67. return
  68. }
  69. file, header, err := ctx.Req.FormFile("file")
  70. if err != nil {
  71. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  72. return
  73. }
  74. defer file.Close()
  75. buf := make([]byte, 1024)
  76. n, _ := file.Read(buf)
  77. if n > 0 {
  78. buf = buf[:n]
  79. }
  80. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  81. if err != nil {
  82. ctx.Error(400, err.Error())
  83. return
  84. }
  85. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  86. attach, err := models.NewAttachment(&models.Attachment{
  87. IsPrivate: true,
  88. UploaderID: ctx.User.ID,
  89. Name: header.Filename,
  90. DatasetID: datasetID,
  91. }, buf, file)
  92. if err != nil {
  93. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  94. return
  95. }
  96. log.Trace("New attachment uploaded: %s", attach.UUID)
  97. ctx.JSON(200, map[string]string{
  98. "uuid": attach.UUID,
  99. })
  100. }
  101. func UpdatePublicAttachment(ctx *context.Context) {
  102. file := ctx.Query("file")
  103. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  104. attach, err := models.GetAttachmentByUUID(file)
  105. if err != nil {
  106. ctx.Error(404, err.Error())
  107. return
  108. }
  109. attach.IsPrivate = isPrivate
  110. models.UpdateAttachment(attach)
  111. }
  112. // DeleteAttachment response for deleting issue's attachment
  113. func DeleteAttachment(ctx *context.Context) {
  114. file := ctx.Query("file")
  115. attach, err := models.GetAttachmentByUUID(file)
  116. if err != nil {
  117. ctx.Error(400, err.Error())
  118. return
  119. }
  120. //issue 214: mod del-dataset permission
  121. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  122. ctx.Error(403)
  123. return
  124. }
  125. err = models.DeleteAttachment(attach, true)
  126. if err != nil {
  127. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  128. return
  129. }
  130. attachjson, _ := json.Marshal(attach)
  131. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  132. DeleteAllUnzipFile(attach, "")
  133. _, err = models.DeleteFileChunkById(attach.UUID)
  134. if err != nil {
  135. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  136. return
  137. }
  138. ctx.JSON(200, map[string]string{
  139. "uuid": attach.UUID,
  140. })
  141. }
  142. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  143. dataset, err := models.GetDatasetByID(attach.DatasetID)
  144. if err != nil {
  145. log.Info("query dataset error")
  146. } else {
  147. repo, err := models.GetRepositoryByID(dataset.RepoID)
  148. if err != nil {
  149. log.Info("query repo error.")
  150. } else {
  151. repo.GetOwner()
  152. if ctx.User != nil {
  153. if repo.Owner.IsOrganization() {
  154. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  155. log.Info("org user may visit the attach.")
  156. return true
  157. }
  158. }
  159. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  160. if isCollaborator {
  161. log.Info("Collaborator user may visit the attach.")
  162. return true
  163. }
  164. }
  165. }
  166. }
  167. return false
  168. }
  169. // GetAttachment serve attachements
  170. func GetAttachment(ctx *context.Context) {
  171. typeCloudBrain := ctx.QueryInt("type")
  172. err := checkTypeCloudBrain(typeCloudBrain)
  173. if err != nil {
  174. ctx.ServerError("checkTypeCloudBrain failed", err)
  175. return
  176. }
  177. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  178. if err != nil {
  179. if models.IsErrAttachmentNotExist(err) {
  180. ctx.Error(404)
  181. } else {
  182. ctx.ServerError("GetAttachmentByUUID", err)
  183. }
  184. return
  185. }
  186. repository, unitType, err := attach.LinkedRepository()
  187. if err != nil {
  188. ctx.ServerError("LinkedRepository", err)
  189. return
  190. }
  191. dataSet, err := attach.LinkedDataSet()
  192. if err != nil {
  193. ctx.ServerError("LinkedDataSet", err)
  194. return
  195. }
  196. if repository == nil && dataSet != nil {
  197. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  198. unitType = models.UnitTypeDatasets
  199. }
  200. if repository == nil { //If not linked
  201. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  202. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  203. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  204. ctx.Error(http.StatusNotFound)
  205. return
  206. }
  207. } else { //If we have the repository we check access
  208. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  209. if errPermission != nil {
  210. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  211. return
  212. }
  213. if !perm.CanRead(unitType) {
  214. ctx.Error(http.StatusNotFound)
  215. return
  216. }
  217. }
  218. if dataSet != nil {
  219. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  220. if err != nil {
  221. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  222. return
  223. }
  224. if !isPermit {
  225. ctx.Error(http.StatusNotFound)
  226. return
  227. }
  228. }
  229. //If we have matched and access to release or issue
  230. if setting.Attachment.StoreType == storage.MinioStorageType {
  231. url := ""
  232. if typeCloudBrain == models.TypeCloudBrainOne {
  233. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  234. if err != nil {
  235. ctx.ServerError("PresignedGetURL", err)
  236. return
  237. }
  238. } else {
  239. if setting.PROXYURL != "" {
  240. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  241. log.Info("return url=" + url)
  242. } else {
  243. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  244. if err != nil {
  245. ctx.ServerError("ObsGetPreSignedUrl", err)
  246. return
  247. }
  248. }
  249. }
  250. if err = increaseDownloadCount(attach, dataSet); err != nil {
  251. ctx.ServerError("Update", err)
  252. return
  253. }
  254. if dataSet != nil {
  255. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  256. } else {
  257. fr, err := storage.Attachments.Open(attach.RelativePath())
  258. if err != nil {
  259. ctx.ServerError("Open", err)
  260. return
  261. }
  262. defer fr.Close()
  263. if err = ServeData(ctx, attach.Name, fr); err != nil {
  264. ctx.ServerError("ServeData", err)
  265. return
  266. }
  267. }
  268. } else {
  269. fr, err := storage.Attachments.Open(attach.RelativePath())
  270. if err != nil {
  271. ctx.ServerError("Open", err)
  272. return
  273. }
  274. defer fr.Close()
  275. if err = increaseDownloadCount(attach, dataSet); err != nil {
  276. ctx.ServerError("Update", err)
  277. return
  278. }
  279. if err = ServeData(ctx, attach.Name, fr); err != nil {
  280. ctx.ServerError("ServeData", err)
  281. return
  282. }
  283. }
  284. }
  285. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  286. if err := attach.IncreaseDownloadCount(); err != nil {
  287. return err
  288. }
  289. if dataSet != nil {
  290. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  291. return err
  292. }
  293. }
  294. return nil
  295. }
  296. // Get a presigned url for put object
  297. func GetPresignedPutObjectURL(ctx *context.Context) {
  298. if !setting.Attachment.Enabled {
  299. ctx.Error(404, "attachment is not enabled")
  300. return
  301. }
  302. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  303. if err != nil {
  304. ctx.Error(400, err.Error())
  305. return
  306. }
  307. if setting.Attachment.StoreType == storage.MinioStorageType {
  308. uuid := gouuid.NewV4().String()
  309. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  310. if err != nil {
  311. ctx.ServerError("PresignedPutURL", err)
  312. return
  313. }
  314. ctx.JSON(200, map[string]string{
  315. "uuid": uuid,
  316. "url": url,
  317. })
  318. } else {
  319. ctx.Error(404, "storage type is not enabled")
  320. return
  321. }
  322. }
  323. // AddAttachment response for add attachment record
  324. func AddAttachment(ctx *context.Context) {
  325. typeCloudBrain := ctx.QueryInt("type")
  326. fileName := ctx.Query("file_name")
  327. err := checkTypeCloudBrain(typeCloudBrain)
  328. if err != nil {
  329. ctx.ServerError("checkTypeCloudBrain failed", err)
  330. return
  331. }
  332. uuid := ctx.Query("uuid")
  333. has := false
  334. if typeCloudBrain == models.TypeCloudBrainOne {
  335. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  336. if err != nil {
  337. ctx.ServerError("HasObject", err)
  338. return
  339. }
  340. } else {
  341. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  342. if err != nil {
  343. ctx.ServerError("ObsHasObject", err)
  344. return
  345. }
  346. }
  347. if !has {
  348. ctx.Error(404, "attachment has not been uploaded")
  349. return
  350. }
  351. datasetId := ctx.QueryInt64("dataset_id")
  352. dataset, err := models.GetDatasetByID(datasetId)
  353. if err != nil {
  354. ctx.Error(404, "dataset does not exist.")
  355. return
  356. }
  357. attachment, err := models.InsertAttachment(&models.Attachment{
  358. UUID: uuid,
  359. UploaderID: ctx.User.ID,
  360. IsPrivate: dataset.IsPrivate(),
  361. Name: fileName,
  362. Size: ctx.QueryInt64("size"),
  363. DatasetID: ctx.QueryInt64("dataset_id"),
  364. Type: typeCloudBrain,
  365. })
  366. if err != nil {
  367. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  368. return
  369. }
  370. if attachment.DatasetID != 0 {
  371. if isCanDecompress(attachment.Name) {
  372. if typeCloudBrain == models.TypeCloudBrainOne {
  373. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  374. if err != nil {
  375. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  376. } else {
  377. attachment.DecompressState = models.DecompressStateIng
  378. err = models.UpdateAttachment(attachment)
  379. if err != nil {
  380. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  381. }
  382. }
  383. }
  384. //todo:decompress type_two
  385. }
  386. }
  387. ctx.JSON(200, map[string]string{
  388. "result_code": "0",
  389. })
  390. }
  391. func isCanDecompress(name string) bool {
  392. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  393. return true
  394. }
  395. return false
  396. }
  397. func UpdateAttachmentDecompressState(ctx *context.Context) {
  398. uuid := ctx.Query("uuid")
  399. result := ctx.Query("result")
  400. attach, err := models.GetAttachmentByUUID(uuid)
  401. if err != nil {
  402. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  403. return
  404. }
  405. if result == DecompressSuccess {
  406. attach.DecompressState = models.DecompressStateDone
  407. } else if result == DecompressFailed {
  408. attach.DecompressState = models.DecompressStateFailed
  409. } else {
  410. log.Error("result is error:", result)
  411. return
  412. }
  413. err = models.UpdateAttachment(attach)
  414. if err != nil {
  415. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  416. return
  417. }
  418. log.Info("start to send msg to labelsystem ")
  419. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  420. var labelMap map[string]string
  421. labelMap = make(map[string]string)
  422. labelMap["UUID"] = uuid
  423. labelMap["Type"] = fmt.Sprint(attach.Type)
  424. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  425. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  426. labelMap["AttachName"] = attach.Name
  427. attachjson, _ := json.Marshal(labelMap)
  428. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  429. log.Info("end to send msg to labelsystem ")
  430. ctx.JSON(200, map[string]string{
  431. "result_code": "0",
  432. })
  433. }
  434. func GetSuccessChunks(ctx *context.Context) {
  435. fileMD5 := ctx.Query("md5")
  436. typeCloudBrain := ctx.QueryInt("type")
  437. fileName := ctx.Query("file_name")
  438. var chunks string
  439. err := checkTypeCloudBrain(typeCloudBrain)
  440. if err != nil {
  441. ctx.ServerError("checkTypeCloudBrain failed", err)
  442. return
  443. }
  444. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  445. if err != nil {
  446. if models.IsErrFileChunkNotExist(err) {
  447. ctx.JSON(200, map[string]string{
  448. "uuid": "",
  449. "uploaded": "0",
  450. "uploadID": "",
  451. "chunks": "",
  452. })
  453. } else {
  454. ctx.ServerError("GetFileChunkByMD5", err)
  455. }
  456. return
  457. }
  458. isExist := false
  459. if typeCloudBrain == models.TypeCloudBrainOne {
  460. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  461. if err != nil {
  462. ctx.ServerError("HasObject failed", err)
  463. return
  464. }
  465. } else {
  466. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName)
  467. if err != nil {
  468. ctx.ServerError("ObsHasObject failed", err)
  469. return
  470. }
  471. }
  472. if isExist {
  473. if fileChunk.IsUploaded == models.FileNotUploaded {
  474. log.Info("the file has been uploaded but not recorded")
  475. fileChunk.IsUploaded = models.FileUploaded
  476. if err = models.UpdateFileChunk(fileChunk); err != nil {
  477. log.Error("UpdateFileChunk failed:", err.Error())
  478. }
  479. }
  480. } else {
  481. if fileChunk.IsUploaded == models.FileUploaded {
  482. log.Info("the file has been recorded but not uploaded")
  483. fileChunk.IsUploaded = models.FileNotUploaded
  484. if err = models.UpdateFileChunk(fileChunk); err != nil {
  485. log.Error("UpdateFileChunk failed:", err.Error())
  486. }
  487. }
  488. if typeCloudBrain == models.TypeCloudBrainOne {
  489. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  490. if err != nil {
  491. log.Error("GetPartInfos failed:%v", err.Error())
  492. }
  493. } else {
  494. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName)
  495. if err != nil {
  496. log.Error("GetObsPartInfos failed:%v", err.Error())
  497. }
  498. }
  499. if err != nil {
  500. models.DeleteFileChunk(fileChunk)
  501. ctx.JSON(200, map[string]string{
  502. "uuid": "",
  503. "uploaded": "0",
  504. "uploadID": "",
  505. "chunks": "",
  506. })
  507. return
  508. }
  509. }
  510. var attachID int64
  511. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  512. if err != nil {
  513. if models.IsErrAttachmentNotExist(err) {
  514. attachID = 0
  515. } else {
  516. ctx.ServerError("GetAttachmentByUUID", err)
  517. return
  518. }
  519. } else {
  520. attachID = attach.ID
  521. }
  522. if attach == nil {
  523. ctx.JSON(200, map[string]string{
  524. "uuid": fileChunk.UUID,
  525. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  526. "uploadID": fileChunk.UploadID,
  527. "chunks": string(chunks),
  528. "attachID": "0",
  529. "datasetID": "0",
  530. "fileName": "",
  531. "datasetName": "",
  532. })
  533. return
  534. }
  535. dataset, err := models.GetDatasetByID(attach.DatasetID)
  536. if err != nil {
  537. ctx.ServerError("GetDatasetByID", err)
  538. return
  539. }
  540. ctx.JSON(200, map[string]string{
  541. "uuid": fileChunk.UUID,
  542. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  543. "uploadID": fileChunk.UploadID,
  544. "chunks": string(chunks),
  545. "attachID": strconv.Itoa(int(attachID)),
  546. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  547. "fileName": attach.Name,
  548. "datasetName": dataset.Title,
  549. })
  550. }
  551. func NewMultipart(ctx *context.Context) {
  552. if !setting.Attachment.Enabled {
  553. ctx.Error(404, "attachment is not enabled")
  554. return
  555. }
  556. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  557. if err != nil {
  558. ctx.Error(400, err.Error())
  559. return
  560. }
  561. typeCloudBrain := ctx.QueryInt("type")
  562. err = checkTypeCloudBrain(typeCloudBrain)
  563. if err != nil {
  564. ctx.ServerError("checkTypeCloudBrain failed", err)
  565. return
  566. }
  567. fileName := ctx.Query("file_name")
  568. if setting.Attachment.StoreType == storage.MinioStorageType {
  569. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  570. if totalChunkCounts > minio_ext.MaxPartsCount {
  571. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  572. return
  573. }
  574. fileSize := ctx.QueryInt64("size")
  575. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  576. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  577. return
  578. }
  579. uuid := gouuid.NewV4().String()
  580. var uploadID string
  581. if typeCloudBrain == models.TypeCloudBrainOne {
  582. uploadID, err = storage.NewMultiPartUpload(uuid)
  583. if err != nil {
  584. ctx.ServerError("NewMultipart", err)
  585. return
  586. }
  587. } else {
  588. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  589. if err != nil {
  590. ctx.ServerError("NewObsMultiPartUpload", err)
  591. return
  592. }
  593. }
  594. _, err = models.InsertFileChunk(&models.FileChunk{
  595. UUID: uuid,
  596. UserID: ctx.User.ID,
  597. UploadID: uploadID,
  598. Md5: ctx.Query("md5"),
  599. Size: fileSize,
  600. TotalChunks: totalChunkCounts,
  601. Type: typeCloudBrain,
  602. })
  603. if err != nil {
  604. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  605. return
  606. }
  607. ctx.JSON(200, map[string]string{
  608. "uuid": uuid,
  609. "uploadID": uploadID,
  610. })
  611. } else {
  612. ctx.Error(404, "storage type is not enabled")
  613. return
  614. }
  615. }
  616. func PutOBSProxyUpload(ctx *context.Context) {
  617. uuid := ctx.Query("uuid")
  618. uploadID := ctx.Query("uploadId")
  619. partNumber := ctx.QueryInt("partNumber")
  620. fileName := ctx.Query("file_name")
  621. RequestBody := ctx.Req.Body()
  622. if RequestBody == nil {
  623. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  624. return
  625. }
  626. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  627. if err != nil {
  628. log.Info("upload error.")
  629. }
  630. }
  631. func GetOBSProxyDownload(ctx *context.Context) {
  632. uuid := ctx.Query("uuid")
  633. fileName := ctx.Query("file_name")
  634. body, err := storage.ObsDownload(uuid, fileName)
  635. if err != nil {
  636. log.Info("upload error.")
  637. } else {
  638. defer body.Close()
  639. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  640. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  641. p := make([]byte, 1024)
  642. var readErr error
  643. var readCount int
  644. // 读取对象内容
  645. for {
  646. readCount, readErr = body.Read(p)
  647. if readCount > 0 {
  648. ctx.Resp.Write(p[:readCount])
  649. //fmt.Printf("%s", p[:readCount])
  650. }
  651. if readErr != nil {
  652. break
  653. }
  654. }
  655. }
  656. }
  657. func GetMultipartUploadUrl(ctx *context.Context) {
  658. uuid := ctx.Query("uuid")
  659. uploadID := ctx.Query("uploadID")
  660. partNumber := ctx.QueryInt("chunkNumber")
  661. size := ctx.QueryInt64("size")
  662. fileName := ctx.Query("file_name")
  663. typeCloudBrain := ctx.QueryInt("type")
  664. err := checkTypeCloudBrain(typeCloudBrain)
  665. if err != nil {
  666. ctx.ServerError("checkTypeCloudBrain failed", err)
  667. return
  668. }
  669. url := ""
  670. if typeCloudBrain == models.TypeCloudBrainOne {
  671. if size > minio_ext.MinPartSize {
  672. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  673. return
  674. }
  675. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  676. if err != nil {
  677. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  678. return
  679. }
  680. } else {
  681. if setting.PROXYURL != "" {
  682. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  683. log.Info("return url=" + url)
  684. } else {
  685. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  686. if err != nil {
  687. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  688. return
  689. }
  690. log.Info("url=" + url)
  691. }
  692. }
  693. ctx.JSON(200, map[string]string{
  694. "url": url,
  695. })
  696. }
  697. func GetObsKey(ctx *context.Context) {
  698. uuid := gouuid.NewV4().String()
  699. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  700. ctx.JSON(200, map[string]string{
  701. "uuid": uuid,
  702. "key": key,
  703. "access_key_id": setting.AccessKeyID,
  704. "secret_access_key": setting.SecretAccessKey,
  705. "server": setting.Endpoint,
  706. "bucket": setting.Bucket,
  707. })
  708. }
  709. func CompleteMultipart(ctx *context.Context) {
  710. uuid := ctx.Query("uuid")
  711. uploadID := ctx.Query("uploadID")
  712. typeCloudBrain := ctx.QueryInt("type")
  713. fileName := ctx.Query("file_name")
  714. err := checkTypeCloudBrain(typeCloudBrain)
  715. if err != nil {
  716. ctx.ServerError("checkTypeCloudBrain failed", err)
  717. return
  718. }
  719. fileChunk, err := models.GetFileChunkByUUID(uuid)
  720. if err != nil {
  721. if models.IsErrFileChunkNotExist(err) {
  722. ctx.Error(404)
  723. } else {
  724. ctx.ServerError("GetFileChunkByUUID", err)
  725. }
  726. return
  727. }
  728. if typeCloudBrain == models.TypeCloudBrainOne {
  729. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  730. if err != nil {
  731. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  732. return
  733. }
  734. } else {
  735. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  736. if err != nil {
  737. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  738. return
  739. }
  740. }
  741. fileChunk.IsUploaded = models.FileUploaded
  742. err = models.UpdateFileChunk(fileChunk)
  743. if err != nil {
  744. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  745. return
  746. }
  747. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  748. attachment, err := models.InsertAttachment(&models.Attachment{
  749. UUID: uuid,
  750. UploaderID: ctx.User.ID,
  751. IsPrivate: dataset.IsPrivate(),
  752. Name: fileName,
  753. Size: ctx.QueryInt64("size"),
  754. DatasetID: ctx.QueryInt64("dataset_id"),
  755. Description: ctx.Query("description"),
  756. Type: typeCloudBrain,
  757. })
  758. if err != nil {
  759. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  760. return
  761. }
  762. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  763. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
  764. if attachment.DatasetID != 0 {
  765. if isCanDecompress(attachment.Name) {
  766. if typeCloudBrain == models.TypeCloudBrainOne {
  767. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  768. if err != nil {
  769. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  770. } else {
  771. attachment.DecompressState = models.DecompressStateIng
  772. err = models.UpdateAttachment(attachment)
  773. if err != nil {
  774. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  775. }
  776. }
  777. }
  778. if typeCloudBrain == models.TypeCloudBrainTwo {
  779. attachjson, _ := json.Marshal(attachment)
  780. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  781. }
  782. } else {
  783. var labelMap map[string]string
  784. labelMap = make(map[string]string)
  785. labelMap["UUID"] = uuid
  786. labelMap["Type"] = fmt.Sprint(attachment.Type)
  787. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  788. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  789. labelMap["AttachName"] = attachment.Name
  790. attachjson, _ := json.Marshal(labelMap)
  791. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  792. }
  793. }
  794. ctx.JSON(200, map[string]string{
  795. "result_code": "0",
  796. })
  797. }
  798. func UpdateMultipart(ctx *context.Context) {
  799. uuid := ctx.Query("uuid")
  800. partNumber := ctx.QueryInt("chunkNumber")
  801. etag := ctx.Query("etag")
  802. fileChunk, err := models.GetFileChunkByUUID(uuid)
  803. if err != nil {
  804. if models.IsErrFileChunkNotExist(err) {
  805. ctx.Error(404)
  806. } else {
  807. ctx.ServerError("GetFileChunkByUUID", err)
  808. }
  809. return
  810. }
  811. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  812. err = models.UpdateFileChunk(fileChunk)
  813. if err != nil {
  814. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  815. return
  816. }
  817. ctx.JSON(200, map[string]string{
  818. "result_code": "0",
  819. })
  820. }
  821. func HandleUnDecompressAttachment() {
  822. attachs, err := models.GetUnDecompressAttachments()
  823. if err != nil {
  824. log.Error("GetUnDecompressAttachments failed:", err.Error())
  825. return
  826. }
  827. for _, attach := range attachs {
  828. if attach.Type == models.TypeCloudBrainOne {
  829. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  830. if err != nil {
  831. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  832. } else {
  833. attach.DecompressState = models.DecompressStateIng
  834. err = models.UpdateAttachment(attach)
  835. if err != nil {
  836. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  837. }
  838. }
  839. } else if attach.Type == models.TypeCloudBrainTwo {
  840. attachjson, _ := json.Marshal(attach)
  841. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  842. }
  843. }
  844. return
  845. }
  846. func QueryAllPublicDataset(ctx *context.Context) {
  847. attachs, err := models.GetAllPublicAttachments()
  848. if err != nil {
  849. ctx.JSON(200, map[string]string{
  850. "result_code": "-1",
  851. "error_msg": err.Error(),
  852. "data": "",
  853. })
  854. return
  855. }
  856. queryDatasets(ctx, attachs)
  857. }
  858. func QueryPrivateDataset(ctx *context.Context) {
  859. username := ctx.Params(":username")
  860. attachs, err := models.GetPrivateAttachments(username)
  861. if err != nil {
  862. ctx.JSON(200, map[string]string{
  863. "result_code": "-1",
  864. "error_msg": err.Error(),
  865. "data": "",
  866. })
  867. return
  868. }
  869. for _, attach := range attachs {
  870. attach.Name = username
  871. }
  872. queryDatasets(ctx, attachs)
  873. }
  874. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  875. var datasets []CloudBrainDataset
  876. if len(attachs) == 0 {
  877. log.Info("dataset is null")
  878. ctx.JSON(200, map[string]string{
  879. "result_code": "0",
  880. "error_msg": "",
  881. "data": "",
  882. })
  883. return
  884. }
  885. for _, attch := range attachs {
  886. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  887. if err != nil || !has {
  888. continue
  889. }
  890. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  891. attch.Attachment.Name,
  892. setting.Attachment.Minio.RealPath +
  893. setting.Attachment.Minio.Bucket + "/" +
  894. setting.Attachment.Minio.BasePath +
  895. models.AttachmentRelativePath(attch.UUID) +
  896. attch.UUID,
  897. attch.Name,
  898. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  899. }
  900. data, err := json.Marshal(datasets)
  901. if err != nil {
  902. log.Error("json.Marshal failed:", err.Error())
  903. ctx.JSON(200, map[string]string{
  904. "result_code": "-1",
  905. "error_msg": err.Error(),
  906. "data": "",
  907. })
  908. return
  909. }
  910. ctx.JSON(200, map[string]string{
  911. "result_code": "0",
  912. "error_msg": "",
  913. "data": string(data),
  914. })
  915. return
  916. }
  917. func checkTypeCloudBrain(typeCloudBrain int) error {
  918. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  919. log.Error("type error:", typeCloudBrain)
  920. return errors.New("type error")
  921. }
  922. return nil
  923. }