You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 27 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/setting"
  21. "code.gitea.io/gitea/modules/storage"
  22. "code.gitea.io/gitea/modules/upload"
  23. "code.gitea.io/gitea/modules/worker"
  24. gouuid "github.com/satori/go.uuid"
  25. )
  26. const (
  27. //result of decompress
  28. DecompressSuccess = "0"
  29. DecompressFailed = "1"
  30. )
  31. type CloudBrainDataset struct {
  32. UUID string `json:"id"`
  33. Name string `json:"name"`
  34. Path string `json:"place"`
  35. UserName string `json:"provider"`
  36. CreateTime string `json:"created_at"`
  37. }
  38. type UploadForm struct {
  39. UploadID string `form:"uploadId"`
  40. UuID string `form:"uuid"`
  41. PartSize int64 `form:"size"`
  42. Offset int64 `form:"offset"`
  43. PartNumber int `form:"chunkNumber"`
  44. PartFile multipart.File `form:"file"`
  45. }
  46. func RenderAttachmentSettings(ctx *context.Context) {
  47. renderAttachmentSettings(ctx)
  48. }
  49. func renderAttachmentSettings(ctx *context.Context) {
  50. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  51. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  52. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  53. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  54. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  55. }
  56. // UploadAttachment response for uploading issue's attachment
  57. func UploadAttachment(ctx *context.Context) {
  58. if !setting.Attachment.Enabled {
  59. ctx.Error(404, "attachment is not enabled")
  60. return
  61. }
  62. file, header, err := ctx.Req.FormFile("file")
  63. if err != nil {
  64. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  65. return
  66. }
  67. defer file.Close()
  68. buf := make([]byte, 1024)
  69. n, _ := file.Read(buf)
  70. if n > 0 {
  71. buf = buf[:n]
  72. }
  73. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  74. if err != nil {
  75. ctx.Error(400, err.Error())
  76. return
  77. }
  78. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  79. attach, err := models.NewAttachment(&models.Attachment{
  80. IsPrivate: true,
  81. UploaderID: ctx.User.ID,
  82. Name: header.Filename,
  83. DatasetID: datasetID,
  84. }, buf, file)
  85. if err != nil {
  86. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  87. return
  88. }
  89. log.Trace("New attachment uploaded: %s", attach.UUID)
  90. ctx.JSON(200, map[string]string{
  91. "uuid": attach.UUID,
  92. })
  93. }
  94. func UpdatePublicAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  97. attach, err := models.GetAttachmentByUUID(file)
  98. if err != nil {
  99. ctx.Error(404, err.Error())
  100. return
  101. }
  102. attach.IsPrivate = isPrivate
  103. models.UpdateAttachment(attach)
  104. }
  105. // DeleteAttachment response for deleting issue's attachment
  106. func DeleteAttachment(ctx *context.Context) {
  107. file := ctx.Query("file")
  108. attach, err := models.GetAttachmentByUUID(file)
  109. if err != nil {
  110. ctx.Error(400, err.Error())
  111. return
  112. }
  113. //issue 214: mod del-dataset permission
  114. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  115. ctx.Error(403)
  116. return
  117. }
  118. err = models.DeleteAttachment(attach, true)
  119. if err != nil {
  120. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  121. return
  122. }
  123. attachjson, _ := json.Marshal(attach)
  124. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  125. DeleteAllUnzipFile(attach, "")
  126. _, err = models.DeleteFileChunkById(attach.UUID)
  127. if err != nil {
  128. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  129. return
  130. }
  131. ctx.JSON(200, map[string]string{
  132. "uuid": attach.UUID,
  133. })
  134. }
  135. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  136. dataset, err := models.GetDatasetByID(attach.DatasetID)
  137. if err != nil {
  138. log.Info("query dataset error")
  139. } else {
  140. repo, err := models.GetRepositoryByID(dataset.RepoID)
  141. if err != nil {
  142. log.Info("query repo error.")
  143. } else {
  144. repo.GetOwner()
  145. if ctx.User != nil {
  146. if repo.Owner.IsOrganization() {
  147. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  148. log.Info("org user may visit the attach.")
  149. return true
  150. }
  151. }
  152. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  153. if isCollaborator {
  154. log.Info("Collaborator user may visit the attach.")
  155. return true
  156. }
  157. }
  158. }
  159. }
  160. return false
  161. }
  162. // GetAttachment serve attachements
  163. func GetAttachment(ctx *context.Context) {
  164. typeCloudBrain := ctx.QueryInt("type")
  165. err := checkTypeCloudBrain(typeCloudBrain)
  166. if err != nil {
  167. ctx.ServerError("checkTypeCloudBrain failed", err)
  168. return
  169. }
  170. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  171. if err != nil {
  172. if models.IsErrAttachmentNotExist(err) {
  173. ctx.Error(404)
  174. } else {
  175. ctx.ServerError("GetAttachmentByUUID", err)
  176. }
  177. return
  178. }
  179. repository, unitType, err := attach.LinkedRepository()
  180. if err != nil {
  181. ctx.ServerError("LinkedRepository", err)
  182. return
  183. }
  184. dataSet, err := attach.LinkedDataSet()
  185. if err != nil {
  186. ctx.ServerError("LinkedDataSet", err)
  187. return
  188. }
  189. if repository == nil && dataSet != nil {
  190. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  191. unitType = models.UnitTypeDatasets
  192. }
  193. if repository == nil { //If not linked
  194. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  195. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  196. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  197. ctx.Error(http.StatusNotFound)
  198. return
  199. }
  200. } else { //If we have the repository we check access
  201. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  202. if errPermission != nil {
  203. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  204. return
  205. }
  206. if !perm.CanRead(unitType) {
  207. ctx.Error(http.StatusNotFound)
  208. return
  209. }
  210. }
  211. if dataSet != nil {
  212. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  213. if err != nil {
  214. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  215. return
  216. }
  217. if !isPermit {
  218. ctx.Error(http.StatusNotFound)
  219. return
  220. }
  221. }
  222. //If we have matched and access to release or issue
  223. if setting.Attachment.StoreType == storage.MinioStorageType {
  224. url := ""
  225. if typeCloudBrain == models.TypeCloudBrainOne {
  226. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  227. if err != nil {
  228. ctx.ServerError("PresignedGetURL", err)
  229. return
  230. }
  231. } else {
  232. if setting.PROXYURL != "" {
  233. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  234. log.Info("return url=" + url)
  235. } else {
  236. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  237. if err != nil {
  238. ctx.ServerError("ObsGetPreSignedUrl", err)
  239. return
  240. }
  241. }
  242. }
  243. if err = increaseDownloadCount(attach, dataSet); err != nil {
  244. ctx.ServerError("Update", err)
  245. return
  246. }
  247. if dataSet != nil {
  248. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  249. } else {
  250. fr, err := storage.Attachments.Open(attach.RelativePath())
  251. if err != nil {
  252. ctx.ServerError("Open", err)
  253. return
  254. }
  255. defer fr.Close()
  256. if err = ServeData(ctx, attach.Name, fr); err != nil {
  257. ctx.ServerError("ServeData", err)
  258. return
  259. }
  260. }
  261. } else {
  262. fr, err := storage.Attachments.Open(attach.RelativePath())
  263. if err != nil {
  264. ctx.ServerError("Open", err)
  265. return
  266. }
  267. defer fr.Close()
  268. if err = increaseDownloadCount(attach, dataSet); err != nil {
  269. ctx.ServerError("Update", err)
  270. return
  271. }
  272. if err = ServeData(ctx, attach.Name, fr); err != nil {
  273. ctx.ServerError("ServeData", err)
  274. return
  275. }
  276. }
  277. }
  278. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  279. if err := attach.IncreaseDownloadCount(); err != nil {
  280. return err
  281. }
  282. if dataSet != nil {
  283. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  284. return err
  285. }
  286. }
  287. return nil
  288. }
  289. // Get a presigned url for put object
  290. func GetPresignedPutObjectURL(ctx *context.Context) {
  291. if !setting.Attachment.Enabled {
  292. ctx.Error(404, "attachment is not enabled")
  293. return
  294. }
  295. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  296. if err != nil {
  297. ctx.Error(400, err.Error())
  298. return
  299. }
  300. if setting.Attachment.StoreType == storage.MinioStorageType {
  301. uuid := gouuid.NewV4().String()
  302. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  303. if err != nil {
  304. ctx.ServerError("PresignedPutURL", err)
  305. return
  306. }
  307. ctx.JSON(200, map[string]string{
  308. "uuid": uuid,
  309. "url": url,
  310. })
  311. } else {
  312. ctx.Error(404, "storage type is not enabled")
  313. return
  314. }
  315. }
  316. // AddAttachment response for add attachment record
  317. func AddAttachment(ctx *context.Context) {
  318. typeCloudBrain := ctx.QueryInt("type")
  319. fileName := ctx.Query("file_name")
  320. err := checkTypeCloudBrain(typeCloudBrain)
  321. if err != nil {
  322. ctx.ServerError("checkTypeCloudBrain failed", err)
  323. return
  324. }
  325. uuid := ctx.Query("uuid")
  326. has := false
  327. if typeCloudBrain == models.TypeCloudBrainOne {
  328. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  329. if err != nil {
  330. ctx.ServerError("HasObject", err)
  331. return
  332. }
  333. } else {
  334. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  335. if err != nil {
  336. ctx.ServerError("ObsHasObject", err)
  337. return
  338. }
  339. }
  340. if !has {
  341. ctx.Error(404, "attachment has not been uploaded")
  342. return
  343. }
  344. attachment, err := models.InsertAttachment(&models.Attachment{
  345. UUID: uuid,
  346. UploaderID: ctx.User.ID,
  347. IsPrivate: true,
  348. Name: fileName,
  349. Size: ctx.QueryInt64("size"),
  350. DatasetID: ctx.QueryInt64("dataset_id"),
  351. Type: typeCloudBrain,
  352. })
  353. if err != nil {
  354. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  355. return
  356. }
  357. if attachment.DatasetID != 0 {
  358. if isCanDecompress(attachment.Name) {
  359. if typeCloudBrain == models.TypeCloudBrainOne {
  360. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  361. if err != nil {
  362. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  363. } else {
  364. attachment.DecompressState = models.DecompressStateIng
  365. err = models.UpdateAttachment(attachment)
  366. if err != nil {
  367. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  368. }
  369. }
  370. }
  371. //todo:decompress type_two
  372. }
  373. }
  374. ctx.JSON(200, map[string]string{
  375. "result_code": "0",
  376. })
  377. }
  378. func isCanDecompress(name string) bool {
  379. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  380. return true
  381. }
  382. return false
  383. }
  384. func UpdateAttachmentDecompressState(ctx *context.Context) {
  385. uuid := ctx.Query("uuid")
  386. result := ctx.Query("result")
  387. attach, err := models.GetAttachmentByUUID(uuid)
  388. if err != nil {
  389. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  390. return
  391. }
  392. if result == DecompressSuccess {
  393. attach.DecompressState = models.DecompressStateDone
  394. } else if result == DecompressFailed {
  395. attach.DecompressState = models.DecompressStateFailed
  396. } else {
  397. log.Error("result is error:", result)
  398. return
  399. }
  400. err = models.UpdateAttachment(attach)
  401. if err != nil {
  402. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  403. return
  404. }
  405. log.Info("start to send msg to labelsystem ")
  406. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  407. var labelMap map[string]string
  408. labelMap = make(map[string]string)
  409. labelMap["UUID"] = uuid
  410. labelMap["Type"] = fmt.Sprint(attach.Type)
  411. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  412. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  413. labelMap["AttachName"] = attach.Name
  414. attachjson, _ := json.Marshal(labelMap)
  415. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  416. log.Info("end to send msg to labelsystem ")
  417. ctx.JSON(200, map[string]string{
  418. "result_code": "0",
  419. })
  420. }
  421. func GetSuccessChunks(ctx *context.Context) {
  422. fileMD5 := ctx.Query("md5")
  423. typeCloudBrain := ctx.QueryInt("type")
  424. fileName := ctx.Query("file_name")
  425. var chunks string
  426. err := checkTypeCloudBrain(typeCloudBrain)
  427. if err != nil {
  428. ctx.ServerError("checkTypeCloudBrain failed", err)
  429. return
  430. }
  431. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  432. if err != nil {
  433. if models.IsErrFileChunkNotExist(err) {
  434. ctx.JSON(200, map[string]string{
  435. "uuid": "",
  436. "uploaded": "0",
  437. "uploadID": "",
  438. "chunks": "",
  439. })
  440. } else {
  441. ctx.ServerError("GetFileChunkByMD5", err)
  442. }
  443. return
  444. }
  445. isExist := false
  446. if typeCloudBrain == models.TypeCloudBrainOne {
  447. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  448. if err != nil {
  449. ctx.ServerError("HasObject failed", err)
  450. return
  451. }
  452. } else {
  453. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName)
  454. if err != nil {
  455. ctx.ServerError("ObsHasObject failed", err)
  456. return
  457. }
  458. }
  459. if isExist {
  460. if fileChunk.IsUploaded == models.FileNotUploaded {
  461. log.Info("the file has been uploaded but not recorded")
  462. fileChunk.IsUploaded = models.FileUploaded
  463. if err = models.UpdateFileChunk(fileChunk); err != nil {
  464. log.Error("UpdateFileChunk failed:", err.Error())
  465. }
  466. }
  467. } else {
  468. if fileChunk.IsUploaded == models.FileUploaded {
  469. log.Info("the file has been recorded but not uploaded")
  470. fileChunk.IsUploaded = models.FileNotUploaded
  471. if err = models.UpdateFileChunk(fileChunk); err != nil {
  472. log.Error("UpdateFileChunk failed:", err.Error())
  473. }
  474. }
  475. if typeCloudBrain == models.TypeCloudBrainOne {
  476. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  477. if err != nil {
  478. log.Error("GetPartInfos failed:%v", err.Error())
  479. }
  480. } else {
  481. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  482. if err != nil {
  483. log.Error("GetObsPartInfos failed:%v", err.Error())
  484. }
  485. }
  486. if err != nil {
  487. models.DeleteFileChunk(fileChunk)
  488. ctx.JSON(200, map[string]string{
  489. "uuid": "",
  490. "uploaded": "0",
  491. "uploadID": "",
  492. "chunks": "",
  493. })
  494. return
  495. }
  496. }
  497. var attachID int64
  498. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  499. if err != nil {
  500. if models.IsErrAttachmentNotExist(err) {
  501. attachID = 0
  502. } else {
  503. ctx.ServerError("GetAttachmentByUUID", err)
  504. return
  505. }
  506. } else {
  507. attachID = attach.ID
  508. }
  509. if attach == nil {
  510. ctx.JSON(200, map[string]string{
  511. "uuid": fileChunk.UUID,
  512. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  513. "uploadID": fileChunk.UploadID,
  514. "chunks": string(chunks),
  515. "attachID": "0",
  516. "datasetID": "0",
  517. "fileName": "",
  518. "datasetName": "",
  519. })
  520. return
  521. }
  522. dataset, err := models.GetDatasetByID(attach.DatasetID)
  523. if err != nil {
  524. ctx.ServerError("GetDatasetByID", err)
  525. return
  526. }
  527. ctx.JSON(200, map[string]string{
  528. "uuid": fileChunk.UUID,
  529. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  530. "uploadID": fileChunk.UploadID,
  531. "chunks": string(chunks),
  532. "attachID": strconv.Itoa(int(attachID)),
  533. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  534. "fileName": attach.Name,
  535. "datasetName": dataset.Title,
  536. })
  537. }
  538. func NewMultipart(ctx *context.Context) {
  539. if !setting.Attachment.Enabled {
  540. ctx.Error(404, "attachment is not enabled")
  541. return
  542. }
  543. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  544. if err != nil {
  545. ctx.Error(400, err.Error())
  546. return
  547. }
  548. typeCloudBrain := ctx.QueryInt("type")
  549. err = checkTypeCloudBrain(typeCloudBrain)
  550. if err != nil {
  551. ctx.ServerError("checkTypeCloudBrain failed", err)
  552. return
  553. }
  554. fileName := ctx.Query("file_name")
  555. if setting.Attachment.StoreType == storage.MinioStorageType {
  556. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  557. if totalChunkCounts > minio_ext.MaxPartsCount {
  558. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  559. return
  560. }
  561. fileSize := ctx.QueryInt64("size")
  562. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  563. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  564. return
  565. }
  566. uuid := gouuid.NewV4().String()
  567. var uploadID string
  568. if typeCloudBrain == models.TypeCloudBrainOne {
  569. uploadID, err = storage.NewMultiPartUpload(uuid)
  570. if err != nil {
  571. ctx.ServerError("NewMultipart", err)
  572. return
  573. }
  574. } else {
  575. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  576. if err != nil {
  577. ctx.ServerError("NewObsMultiPartUpload", err)
  578. return
  579. }
  580. }
  581. _, err = models.InsertFileChunk(&models.FileChunk{
  582. UUID: uuid,
  583. UserID: ctx.User.ID,
  584. UploadID: uploadID,
  585. Md5: ctx.Query("md5"),
  586. Size: fileSize,
  587. TotalChunks: totalChunkCounts,
  588. Type: typeCloudBrain,
  589. })
  590. if err != nil {
  591. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  592. return
  593. }
  594. ctx.JSON(200, map[string]string{
  595. "uuid": uuid,
  596. "uploadID": uploadID,
  597. })
  598. } else {
  599. ctx.Error(404, "storage type is not enabled")
  600. return
  601. }
  602. }
  603. func PutOBSProxyUpload(ctx *context.Context) {
  604. uuid := ctx.Query("uuid")
  605. uploadID := ctx.Query("uploadId")
  606. partNumber := ctx.QueryInt("partNumber")
  607. fileName := ctx.Query("file_name")
  608. RequestBody := ctx.Req.Body()
  609. if RequestBody == nil {
  610. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  611. return
  612. }
  613. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  614. if err != nil {
  615. log.Info("upload error.")
  616. }
  617. }
  618. func GetOBSProxyDownload(ctx *context.Context) {
  619. uuid := ctx.Query("uuid")
  620. fileName := ctx.Query("file_name")
  621. body, err := storage.ObsDownload(uuid, fileName)
  622. if err != nil {
  623. log.Info("upload error.")
  624. } else {
  625. defer body.Close()
  626. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  627. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  628. p := make([]byte, 1024)
  629. var readErr error
  630. var readCount int
  631. // 读取对象内容
  632. for {
  633. readCount, readErr = body.Read(p)
  634. if readCount > 0 {
  635. ctx.Resp.Write(p[:readCount])
  636. //fmt.Printf("%s", p[:readCount])
  637. }
  638. if readErr != nil {
  639. break
  640. }
  641. }
  642. }
  643. }
  644. func GetMultipartUploadUrl(ctx *context.Context) {
  645. uuid := ctx.Query("uuid")
  646. uploadID := ctx.Query("uploadID")
  647. partNumber := ctx.QueryInt("chunkNumber")
  648. size := ctx.QueryInt64("size")
  649. fileName := ctx.Query("file_name")
  650. typeCloudBrain := ctx.QueryInt("type")
  651. err := checkTypeCloudBrain(typeCloudBrain)
  652. if err != nil {
  653. ctx.ServerError("checkTypeCloudBrain failed", err)
  654. return
  655. }
  656. url := ""
  657. if typeCloudBrain == models.TypeCloudBrainOne {
  658. if size > minio_ext.MinPartSize {
  659. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  660. return
  661. }
  662. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  663. if err != nil {
  664. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  665. return
  666. }
  667. } else {
  668. if setting.PROXYURL != "" {
  669. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  670. log.Info("return url=" + url)
  671. } else {
  672. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  673. if err != nil {
  674. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  675. return
  676. }
  677. log.Info("url=" + url)
  678. }
  679. }
  680. ctx.JSON(200, map[string]string{
  681. "url": url,
  682. })
  683. }
  684. func GetObsKey(ctx *context.Context) {
  685. uuid := gouuid.NewV4().String()
  686. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  687. ctx.JSON(200, map[string]string{
  688. "uuid": uuid,
  689. "key": key,
  690. "access_key_id": setting.AccessKeyID,
  691. "secret_access_key": setting.SecretAccessKey,
  692. "server": setting.Endpoint,
  693. "bucket": setting.Bucket,
  694. })
  695. }
  696. func CompleteMultipart(ctx *context.Context) {
  697. uuid := ctx.Query("uuid")
  698. uploadID := ctx.Query("uploadID")
  699. typeCloudBrain := ctx.QueryInt("type")
  700. fileName := ctx.Query("file_name")
  701. err := checkTypeCloudBrain(typeCloudBrain)
  702. if err != nil {
  703. ctx.ServerError("checkTypeCloudBrain failed", err)
  704. return
  705. }
  706. fileChunk, err := models.GetFileChunkByUUID(uuid)
  707. if err != nil {
  708. if models.IsErrFileChunkNotExist(err) {
  709. ctx.Error(404)
  710. } else {
  711. ctx.ServerError("GetFileChunkByUUID", err)
  712. }
  713. return
  714. }
  715. if typeCloudBrain == models.TypeCloudBrainOne {
  716. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  717. if err != nil {
  718. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  719. return
  720. }
  721. } else {
  722. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  723. if err != nil {
  724. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  725. return
  726. }
  727. }
  728. fileChunk.IsUploaded = models.FileUploaded
  729. err = models.UpdateFileChunk(fileChunk)
  730. if err != nil {
  731. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  732. return
  733. }
  734. attachment, err := models.InsertAttachment(&models.Attachment{
  735. UUID: uuid,
  736. UploaderID: ctx.User.ID,
  737. IsPrivate: true,
  738. Name: fileName,
  739. Size: ctx.QueryInt64("size"),
  740. DatasetID: ctx.QueryInt64("dataset_id"),
  741. Type: typeCloudBrain,
  742. })
  743. if err != nil {
  744. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  745. return
  746. }
  747. if attachment.DatasetID != 0 {
  748. if isCanDecompress(attachment.Name) {
  749. if typeCloudBrain == models.TypeCloudBrainOne {
  750. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  751. if err != nil {
  752. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  753. } else {
  754. attachment.DecompressState = models.DecompressStateIng
  755. err = models.UpdateAttachment(attachment)
  756. if err != nil {
  757. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  758. }
  759. }
  760. }
  761. if typeCloudBrain == models.TypeCloudBrainTwo {
  762. attachjson, _ := json.Marshal(attachment)
  763. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  764. }
  765. } else {
  766. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  767. var labelMap map[string]string
  768. labelMap = make(map[string]string)
  769. labelMap["UUID"] = uuid
  770. labelMap["Type"] = fmt.Sprint(attachment.Type)
  771. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  772. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  773. labelMap["AttachName"] = attachment.Name
  774. attachjson, _ := json.Marshal(labelMap)
  775. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  776. }
  777. }
  778. ctx.JSON(200, map[string]string{
  779. "result_code": "0",
  780. })
  781. }
  782. func UpdateMultipart(ctx *context.Context) {
  783. uuid := ctx.Query("uuid")
  784. partNumber := ctx.QueryInt("chunkNumber")
  785. etag := ctx.Query("etag")
  786. fileChunk, err := models.GetFileChunkByUUID(uuid)
  787. if err != nil {
  788. if models.IsErrFileChunkNotExist(err) {
  789. ctx.Error(404)
  790. } else {
  791. ctx.ServerError("GetFileChunkByUUID", err)
  792. }
  793. return
  794. }
  795. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  796. err = models.UpdateFileChunk(fileChunk)
  797. if err != nil {
  798. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  799. return
  800. }
  801. ctx.JSON(200, map[string]string{
  802. "result_code": "0",
  803. })
  804. }
  805. func HandleUnDecompressAttachment() {
  806. attachs, err := models.GetUnDecompressAttachments()
  807. if err != nil {
  808. log.Error("GetUnDecompressAttachments failed:", err.Error())
  809. return
  810. }
  811. for _, attach := range attachs {
  812. if attach.Type == models.TypeCloudBrainOne {
  813. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  814. if err != nil {
  815. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  816. } else {
  817. attach.DecompressState = models.DecompressStateIng
  818. err = models.UpdateAttachment(attach)
  819. if err != nil {
  820. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  821. }
  822. }
  823. } else if attach.Type == models.TypeCloudBrainTwo {
  824. attachjson, _ := json.Marshal(attach)
  825. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  826. }
  827. }
  828. return
  829. }
  830. func QueryAllPublicDataset(ctx *context.Context) {
  831. attachs, err := models.GetAllPublicAttachments()
  832. if err != nil {
  833. ctx.JSON(200, map[string]string{
  834. "result_code": "-1",
  835. "error_msg": err.Error(),
  836. "data": "",
  837. })
  838. return
  839. }
  840. queryDatasets(ctx, attachs)
  841. }
  842. func QueryPrivateDataset(ctx *context.Context) {
  843. username := ctx.Params(":username")
  844. attachs, err := models.GetPrivateAttachments(username)
  845. if err != nil {
  846. ctx.JSON(200, map[string]string{
  847. "result_code": "-1",
  848. "error_msg": err.Error(),
  849. "data": "",
  850. })
  851. return
  852. }
  853. for _, attach := range attachs {
  854. attach.Name = username
  855. }
  856. queryDatasets(ctx, attachs)
  857. }
  858. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  859. var datasets []CloudBrainDataset
  860. if len(attachs) == 0 {
  861. log.Info("dataset is null")
  862. ctx.JSON(200, map[string]string{
  863. "result_code": "0",
  864. "error_msg": "",
  865. "data": "",
  866. })
  867. return
  868. }
  869. for _, attch := range attachs {
  870. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  871. if err != nil || !has {
  872. continue
  873. }
  874. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  875. attch.Attachment.Name,
  876. setting.Attachment.Minio.RealPath +
  877. setting.Attachment.Minio.Bucket + "/" +
  878. setting.Attachment.Minio.BasePath +
  879. models.AttachmentRelativePath(attch.UUID) +
  880. attch.UUID,
  881. attch.Name,
  882. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  883. }
  884. data, err := json.Marshal(datasets)
  885. if err != nil {
  886. log.Error("json.Marshal failed:", err.Error())
  887. ctx.JSON(200, map[string]string{
  888. "result_code": "-1",
  889. "error_msg": err.Error(),
  890. "data": "",
  891. })
  892. return
  893. }
  894. ctx.JSON(200, map[string]string{
  895. "result_code": "0",
  896. "error_msg": "",
  897. "data": string(data),
  898. })
  899. return
  900. }
  901. func checkTypeCloudBrain(typeCloudBrain int) error {
  902. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  903. log.Error("type error:", typeCloudBrain)
  904. return errors.New("type error")
  905. }
  906. return nil
  907. }