You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 28 kB

4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
3 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
3 years ago
3 years ago
4 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "strconv"
  13. "strings"
  14. "code.gitea.io/gitea/modules/auth"
  15. "code.gitea.io/gitea/modules/base"
  16. "code.gitea.io/gitea/models"
  17. "code.gitea.io/gitea/modules/context"
  18. "code.gitea.io/gitea/modules/labelmsg"
  19. "code.gitea.io/gitea/modules/log"
  20. "code.gitea.io/gitea/modules/minio_ext"
  21. "code.gitea.io/gitea/modules/notification"
  22. "code.gitea.io/gitea/modules/setting"
  23. "code.gitea.io/gitea/modules/storage"
  24. "code.gitea.io/gitea/modules/upload"
  25. "code.gitea.io/gitea/modules/worker"
  26. gouuid "github.com/satori/go.uuid"
  27. )
  28. const (
  29. //result of decompress
  30. DecompressSuccess = "0"
  31. DecompressFailed = "1"
  32. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  33. tplAttachmentEdit base.TplName = "repo/attachment/edit"
  34. )
  35. type CloudBrainDataset struct {
  36. UUID string `json:"id"`
  37. Name string `json:"name"`
  38. Path string `json:"place"`
  39. UserName string `json:"provider"`
  40. CreateTime string `json:"created_at"`
  41. }
  42. type UploadForm struct {
  43. UploadID string `form:"uploadId"`
  44. UuID string `form:"uuid"`
  45. PartSize int64 `form:"size"`
  46. Offset int64 `form:"offset"`
  47. PartNumber int `form:"chunkNumber"`
  48. PartFile multipart.File `form:"file"`
  49. }
  50. func RenderAttachmentSettings(ctx *context.Context) {
  51. renderAttachmentSettings(ctx)
  52. }
  53. func renderAttachmentSettings(ctx *context.Context) {
  54. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  55. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  56. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  57. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  58. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  59. }
  60. func UploadAttachmentUI(ctx *context.Context) {
  61. ctx.Data["datasetId"] = ctx.Query("datasetId")
  62. ctx.Data["PageIsDataset"] = true
  63. ctx.HTML(200, tplAttachmentUpload)
  64. }
  65. func EditAttachmentUI(ctx *context.Context) {
  66. id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
  67. ctx.Data["PageIsDataset"] = true
  68. attachment, _ := models.GetAttachmentByID(id)
  69. if attachment == nil {
  70. ctx.Error(404, "The attachment does not exits.")
  71. }
  72. ctx.Data["Attachment"] = attachment
  73. ctx.HTML(200, tplAttachmentEdit)
  74. }
  75. func EditAttachment(ctx *context.Context, form auth.EditAttachmentForm) {
  76. err := models.UpdateAttachmentDescription(&models.Attachment{
  77. ID: form.ID,
  78. Description: form.Description,
  79. })
  80. if err != nil {
  81. ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.edit_attachment_fail")))
  82. }
  83. ctx.JSON(http.StatusOK, models.BaseOKMessage)
  84. }
  85. // UploadAttachment response for uploading issue's attachment
  86. func UploadAttachment(ctx *context.Context) {
  87. if !setting.Attachment.Enabled {
  88. ctx.Error(404, "attachment is not enabled")
  89. return
  90. }
  91. file, header, err := ctx.Req.FormFile("file")
  92. if err != nil {
  93. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  94. return
  95. }
  96. defer file.Close()
  97. buf := make([]byte, 1024)
  98. n, _ := file.Read(buf)
  99. if n > 0 {
  100. buf = buf[:n]
  101. }
  102. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  103. if err != nil {
  104. ctx.Error(400, err.Error())
  105. return
  106. }
  107. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  108. attach, err := models.NewAttachment(&models.Attachment{
  109. IsPrivate: true,
  110. UploaderID: ctx.User.ID,
  111. Name: header.Filename,
  112. DatasetID: datasetID,
  113. }, buf, file)
  114. if err != nil {
  115. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  116. return
  117. }
  118. log.Trace("New attachment uploaded: %s", attach.UUID)
  119. ctx.JSON(200, map[string]string{
  120. "uuid": attach.UUID,
  121. })
  122. }
  123. func UpdatePublicAttachment(ctx *context.Context) {
  124. file := ctx.Query("file")
  125. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  126. attach, err := models.GetAttachmentByUUID(file)
  127. if err != nil {
  128. ctx.Error(404, err.Error())
  129. return
  130. }
  131. attach.IsPrivate = isPrivate
  132. models.UpdateAttachment(attach)
  133. }
  134. // DeleteAttachment response for deleting issue's attachment
  135. func DeleteAttachment(ctx *context.Context) {
  136. file := ctx.Query("file")
  137. attach, err := models.GetAttachmentByUUID(file)
  138. if err != nil {
  139. ctx.Error(400, err.Error())
  140. return
  141. }
  142. //issue 214: mod del-dataset permission
  143. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  144. ctx.Error(403)
  145. return
  146. }
  147. err = models.DeleteAttachment(attach, true)
  148. if err != nil {
  149. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  150. return
  151. }
  152. attachjson, _ := json.Marshal(attach)
  153. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  154. DeleteAllUnzipFile(attach, "")
  155. _, err = models.DeleteFileChunkById(attach.UUID)
  156. if err != nil {
  157. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  158. return
  159. }
  160. ctx.JSON(200, map[string]string{
  161. "uuid": attach.UUID,
  162. })
  163. }
  164. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  165. dataset, err := models.GetDatasetByID(attach.DatasetID)
  166. if err != nil {
  167. log.Info("query dataset error")
  168. } else {
  169. repo, err := models.GetRepositoryByID(dataset.RepoID)
  170. if err != nil {
  171. log.Info("query repo error.")
  172. } else {
  173. repo.GetOwner()
  174. if ctx.User != nil {
  175. if repo.Owner.IsOrganization() {
  176. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  177. log.Info("org user may visit the attach.")
  178. return true
  179. }
  180. }
  181. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  182. if isCollaborator {
  183. log.Info("Collaborator user may visit the attach.")
  184. return true
  185. }
  186. }
  187. }
  188. }
  189. return false
  190. }
  191. // GetAttachment serve attachements
  192. func GetAttachment(ctx *context.Context) {
  193. typeCloudBrain := ctx.QueryInt("type")
  194. err := checkTypeCloudBrain(typeCloudBrain)
  195. if err != nil {
  196. ctx.ServerError("checkTypeCloudBrain failed", err)
  197. return
  198. }
  199. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  200. if err != nil {
  201. if models.IsErrAttachmentNotExist(err) {
  202. ctx.Error(404)
  203. } else {
  204. ctx.ServerError("GetAttachmentByUUID", err)
  205. }
  206. return
  207. }
  208. repository, unitType, err := attach.LinkedRepository()
  209. if err != nil {
  210. ctx.ServerError("LinkedRepository", err)
  211. return
  212. }
  213. dataSet, err := attach.LinkedDataSet()
  214. if err != nil {
  215. ctx.ServerError("LinkedDataSet", err)
  216. return
  217. }
  218. if repository == nil && dataSet != nil {
  219. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  220. unitType = models.UnitTypeDatasets
  221. }
  222. if repository == nil { //If not linked
  223. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  224. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  225. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  226. ctx.Error(http.StatusNotFound)
  227. return
  228. }
  229. } else { //If we have the repository we check access
  230. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  231. if errPermission != nil {
  232. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  233. return
  234. }
  235. if !perm.CanRead(unitType) {
  236. ctx.Error(http.StatusNotFound)
  237. return
  238. }
  239. }
  240. if dataSet != nil {
  241. if !ctx.IsSigned {
  242. ctx.SetCookie("redirect_to", setting.AppSubURL+ctx.Req.URL.RequestURI(), 0, setting.AppSubURL)
  243. ctx.Redirect(setting.AppSubURL + "/user/login")
  244. return
  245. } else {
  246. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  247. if err != nil {
  248. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  249. return
  250. }
  251. if !isPermit {
  252. ctx.Error(http.StatusNotFound)
  253. return
  254. }
  255. }
  256. }
  257. //If we have matched and access to release or issue
  258. if setting.Attachment.StoreType == storage.MinioStorageType {
  259. url := ""
  260. if typeCloudBrain == models.TypeCloudBrainOne {
  261. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  262. if err != nil {
  263. ctx.ServerError("PresignedGetURL", err)
  264. return
  265. }
  266. } else {
  267. if setting.PROXYURL != "" {
  268. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  269. log.Info("return url=" + url)
  270. } else {
  271. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  272. if err != nil {
  273. ctx.ServerError("ObsGetPreSignedUrl", err)
  274. return
  275. }
  276. }
  277. }
  278. if err = increaseDownloadCount(attach, dataSet); err != nil {
  279. ctx.ServerError("Update", err)
  280. return
  281. }
  282. if dataSet != nil {
  283. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  284. } else {
  285. fr, err := storage.Attachments.Open(attach.RelativePath())
  286. if err != nil {
  287. ctx.ServerError("Open", err)
  288. return
  289. }
  290. defer fr.Close()
  291. if err = ServeData(ctx, attach.Name, fr); err != nil {
  292. ctx.ServerError("ServeData", err)
  293. return
  294. }
  295. }
  296. } else {
  297. fr, err := storage.Attachments.Open(attach.RelativePath())
  298. if err != nil {
  299. ctx.ServerError("Open", err)
  300. return
  301. }
  302. defer fr.Close()
  303. if err = increaseDownloadCount(attach, dataSet); err != nil {
  304. ctx.ServerError("Update", err)
  305. return
  306. }
  307. if err = ServeData(ctx, attach.Name, fr); err != nil {
  308. ctx.ServerError("ServeData", err)
  309. return
  310. }
  311. }
  312. }
  313. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  314. if err := attach.IncreaseDownloadCount(); err != nil {
  315. return err
  316. }
  317. if dataSet != nil {
  318. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  319. return err
  320. }
  321. }
  322. return nil
  323. }
  324. // Get a presigned url for put object
  325. func GetPresignedPutObjectURL(ctx *context.Context) {
  326. if !setting.Attachment.Enabled {
  327. ctx.Error(404, "attachment is not enabled")
  328. return
  329. }
  330. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  331. if err != nil {
  332. ctx.Error(400, err.Error())
  333. return
  334. }
  335. if setting.Attachment.StoreType == storage.MinioStorageType {
  336. uuid := gouuid.NewV4().String()
  337. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  338. if err != nil {
  339. ctx.ServerError("PresignedPutURL", err)
  340. return
  341. }
  342. ctx.JSON(200, map[string]string{
  343. "uuid": uuid,
  344. "url": url,
  345. })
  346. } else {
  347. ctx.Error(404, "storage type is not enabled")
  348. return
  349. }
  350. }
  351. // AddAttachment response for add attachment record
  352. func AddAttachment(ctx *context.Context) {
  353. typeCloudBrain := ctx.QueryInt("type")
  354. fileName := ctx.Query("file_name")
  355. err := checkTypeCloudBrain(typeCloudBrain)
  356. if err != nil {
  357. ctx.ServerError("checkTypeCloudBrain failed", err)
  358. return
  359. }
  360. uuid := ctx.Query("uuid")
  361. has := false
  362. if typeCloudBrain == models.TypeCloudBrainOne {
  363. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  364. if err != nil {
  365. ctx.ServerError("HasObject", err)
  366. return
  367. }
  368. } else {
  369. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  370. if err != nil {
  371. ctx.ServerError("ObsHasObject", err)
  372. return
  373. }
  374. }
  375. if !has {
  376. ctx.Error(404, "attachment has not been uploaded")
  377. return
  378. }
  379. datasetId := ctx.QueryInt64("dataset_id")
  380. dataset, err := models.GetDatasetByID(datasetId)
  381. if err != nil {
  382. ctx.Error(404, "dataset does not exist.")
  383. return
  384. }
  385. attachment, err := models.InsertAttachment(&models.Attachment{
  386. UUID: uuid,
  387. UploaderID: ctx.User.ID,
  388. IsPrivate: dataset.IsPrivate(),
  389. Name: fileName,
  390. Size: ctx.QueryInt64("size"),
  391. DatasetID: ctx.QueryInt64("dataset_id"),
  392. Type: typeCloudBrain,
  393. })
  394. if err != nil {
  395. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  396. return
  397. }
  398. if attachment.DatasetID != 0 {
  399. if isCanDecompress(attachment.Name) {
  400. if typeCloudBrain == models.TypeCloudBrainOne {
  401. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  402. if err != nil {
  403. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  404. } else {
  405. attachment.DecompressState = models.DecompressStateIng
  406. err = models.UpdateAttachment(attachment)
  407. if err != nil {
  408. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  409. }
  410. }
  411. }
  412. //todo:decompress type_two
  413. }
  414. }
  415. ctx.JSON(200, map[string]string{
  416. "result_code": "0",
  417. })
  418. }
  419. func isCanDecompress(name string) bool {
  420. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  421. return true
  422. }
  423. return false
  424. }
  425. func UpdateAttachmentDecompressState(ctx *context.Context) {
  426. uuid := ctx.Query("uuid")
  427. result := ctx.Query("result")
  428. attach, err := models.GetAttachmentByUUID(uuid)
  429. if err != nil {
  430. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  431. return
  432. }
  433. if result == DecompressSuccess {
  434. attach.DecompressState = models.DecompressStateDone
  435. } else if result == DecompressFailed {
  436. attach.DecompressState = models.DecompressStateFailed
  437. } else {
  438. log.Error("result is error:", result)
  439. return
  440. }
  441. err = models.UpdateAttachment(attach)
  442. if err != nil {
  443. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  444. return
  445. }
  446. log.Info("start to send msg to labelsystem ")
  447. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  448. var labelMap map[string]string
  449. labelMap = make(map[string]string)
  450. labelMap["UUID"] = uuid
  451. labelMap["Type"] = fmt.Sprint(attach.Type)
  452. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  453. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  454. labelMap["AttachName"] = attach.Name
  455. attachjson, _ := json.Marshal(labelMap)
  456. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  457. log.Info("end to send msg to labelsystem ")
  458. ctx.JSON(200, map[string]string{
  459. "result_code": "0",
  460. })
  461. }
  462. func GetSuccessChunks(ctx *context.Context) {
  463. fileMD5 := ctx.Query("md5")
  464. typeCloudBrain := ctx.QueryInt("type")
  465. fileName := ctx.Query("file_name")
  466. var chunks string
  467. err := checkTypeCloudBrain(typeCloudBrain)
  468. if err != nil {
  469. ctx.ServerError("checkTypeCloudBrain failed", err)
  470. return
  471. }
  472. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  473. if err != nil {
  474. if models.IsErrFileChunkNotExist(err) {
  475. ctx.JSON(200, map[string]string{
  476. "uuid": "",
  477. "uploaded": "0",
  478. "uploadID": "",
  479. "chunks": "",
  480. })
  481. } else {
  482. ctx.ServerError("GetFileChunkByMD5", err)
  483. }
  484. return
  485. }
  486. isExist := false
  487. if typeCloudBrain == models.TypeCloudBrainOne {
  488. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  489. if err != nil {
  490. ctx.ServerError("HasObject failed", err)
  491. return
  492. }
  493. } else {
  494. oldFileName := fileName
  495. oldAttachment, _ := models.GetAttachmentByUUID(fileChunk.UUID)
  496. if oldAttachment != nil {
  497. oldFileName = oldAttachment.Name
  498. }
  499. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + oldFileName)
  500. if err != nil {
  501. ctx.ServerError("ObsHasObject failed", err)
  502. return
  503. }
  504. }
  505. if isExist {
  506. if fileChunk.IsUploaded == models.FileNotUploaded {
  507. log.Info("the file has been uploaded but not recorded")
  508. fileChunk.IsUploaded = models.FileUploaded
  509. if err = models.UpdateFileChunk(fileChunk); err != nil {
  510. log.Error("UpdateFileChunk failed:", err.Error())
  511. }
  512. }
  513. } else {
  514. if fileChunk.IsUploaded == models.FileUploaded {
  515. log.Info("the file has been recorded but not uploaded")
  516. fileChunk.IsUploaded = models.FileNotUploaded
  517. if err = models.UpdateFileChunk(fileChunk); err != nil {
  518. log.Error("UpdateFileChunk failed:", err.Error())
  519. }
  520. }
  521. if typeCloudBrain == models.TypeCloudBrainOne {
  522. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  523. if err != nil {
  524. log.Error("GetPartInfos failed:%v", err.Error())
  525. }
  526. } else {
  527. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName)
  528. if err != nil {
  529. log.Error("GetObsPartInfos failed:%v", err.Error())
  530. }
  531. }
  532. if err != nil {
  533. models.DeleteFileChunk(fileChunk)
  534. ctx.JSON(200, map[string]string{
  535. "uuid": "",
  536. "uploaded": "0",
  537. "uploadID": "",
  538. "chunks": "",
  539. })
  540. return
  541. }
  542. }
  543. var attachID int64
  544. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  545. if err != nil {
  546. if models.IsErrAttachmentNotExist(err) {
  547. attachID = 0
  548. } else {
  549. ctx.ServerError("GetAttachmentByUUID", err)
  550. return
  551. }
  552. } else {
  553. attachID = attach.ID
  554. }
  555. if attach == nil {
  556. ctx.JSON(200, map[string]string{
  557. "uuid": fileChunk.UUID,
  558. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  559. "uploadID": fileChunk.UploadID,
  560. "chunks": string(chunks),
  561. "attachID": "0",
  562. "datasetID": "0",
  563. "fileName": "",
  564. "datasetName": "",
  565. })
  566. return
  567. }
  568. dataset, err := models.GetDatasetByID(attach.DatasetID)
  569. if err != nil {
  570. ctx.ServerError("GetDatasetByID", err)
  571. return
  572. }
  573. ctx.JSON(200, map[string]string{
  574. "uuid": fileChunk.UUID,
  575. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  576. "uploadID": fileChunk.UploadID,
  577. "chunks": string(chunks),
  578. "attachID": strconv.Itoa(int(attachID)),
  579. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  580. "fileName": attach.Name,
  581. "datasetName": dataset.Title,
  582. })
  583. }
  584. func NewMultipart(ctx *context.Context) {
  585. if !setting.Attachment.Enabled {
  586. ctx.Error(404, "attachment is not enabled")
  587. return
  588. }
  589. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  590. if err != nil {
  591. ctx.Error(400, err.Error())
  592. return
  593. }
  594. typeCloudBrain := ctx.QueryInt("type")
  595. err = checkTypeCloudBrain(typeCloudBrain)
  596. if err != nil {
  597. ctx.ServerError("checkTypeCloudBrain failed", err)
  598. return
  599. }
  600. fileName := ctx.Query("file_name")
  601. if setting.Attachment.StoreType == storage.MinioStorageType {
  602. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  603. if totalChunkCounts > minio_ext.MaxPartsCount {
  604. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  605. return
  606. }
  607. fileSize := ctx.QueryInt64("size")
  608. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  609. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  610. return
  611. }
  612. uuid := gouuid.NewV4().String()
  613. var uploadID string
  614. if typeCloudBrain == models.TypeCloudBrainOne {
  615. uploadID, err = storage.NewMultiPartUpload(uuid)
  616. if err != nil {
  617. ctx.ServerError("NewMultipart", err)
  618. return
  619. }
  620. } else {
  621. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  622. if err != nil {
  623. ctx.ServerError("NewObsMultiPartUpload", err)
  624. return
  625. }
  626. }
  627. _, err = models.InsertFileChunk(&models.FileChunk{
  628. UUID: uuid,
  629. UserID: ctx.User.ID,
  630. UploadID: uploadID,
  631. Md5: ctx.Query("md5"),
  632. Size: fileSize,
  633. TotalChunks: totalChunkCounts,
  634. Type: typeCloudBrain,
  635. })
  636. if err != nil {
  637. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  638. return
  639. }
  640. ctx.JSON(200, map[string]string{
  641. "uuid": uuid,
  642. "uploadID": uploadID,
  643. })
  644. } else {
  645. ctx.Error(404, "storage type is not enabled")
  646. return
  647. }
  648. }
  649. func PutOBSProxyUpload(ctx *context.Context) {
  650. uuid := ctx.Query("uuid")
  651. uploadID := ctx.Query("uploadId")
  652. partNumber := ctx.QueryInt("partNumber")
  653. fileName := ctx.Query("file_name")
  654. RequestBody := ctx.Req.Body()
  655. if RequestBody == nil {
  656. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  657. return
  658. }
  659. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  660. if err != nil {
  661. log.Info("upload error.")
  662. }
  663. }
  664. func GetOBSProxyDownload(ctx *context.Context) {
  665. uuid := ctx.Query("uuid")
  666. fileName := ctx.Query("file_name")
  667. body, err := storage.ObsDownload(uuid, fileName)
  668. if err != nil {
  669. log.Info("upload error.")
  670. } else {
  671. defer body.Close()
  672. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  673. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  674. p := make([]byte, 1024)
  675. var readErr error
  676. var readCount int
  677. // 读取对象内容
  678. for {
  679. readCount, readErr = body.Read(p)
  680. if readCount > 0 {
  681. ctx.Resp.Write(p[:readCount])
  682. //fmt.Printf("%s", p[:readCount])
  683. }
  684. if readErr != nil {
  685. break
  686. }
  687. }
  688. }
  689. }
  690. func GetMultipartUploadUrl(ctx *context.Context) {
  691. uuid := ctx.Query("uuid")
  692. uploadID := ctx.Query("uploadID")
  693. partNumber := ctx.QueryInt("chunkNumber")
  694. size := ctx.QueryInt64("size")
  695. fileName := ctx.Query("file_name")
  696. typeCloudBrain := ctx.QueryInt("type")
  697. err := checkTypeCloudBrain(typeCloudBrain)
  698. if err != nil {
  699. ctx.ServerError("checkTypeCloudBrain failed", err)
  700. return
  701. }
  702. url := ""
  703. if typeCloudBrain == models.TypeCloudBrainOne {
  704. if size > minio_ext.MinPartSize {
  705. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  706. return
  707. }
  708. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  709. if err != nil {
  710. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  711. return
  712. }
  713. } else {
  714. if setting.PROXYURL != "" {
  715. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  716. log.Info("return url=" + url)
  717. } else {
  718. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  719. if err != nil {
  720. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  721. return
  722. }
  723. log.Info("url=" + url)
  724. }
  725. }
  726. ctx.JSON(200, map[string]string{
  727. "url": url,
  728. })
  729. }
  730. func CompleteMultipart(ctx *context.Context) {
  731. uuid := ctx.Query("uuid")
  732. uploadID := ctx.Query("uploadID")
  733. typeCloudBrain := ctx.QueryInt("type")
  734. fileName := ctx.Query("file_name")
  735. log.Warn("uuid:" + uuid)
  736. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  737. err := checkTypeCloudBrain(typeCloudBrain)
  738. if err != nil {
  739. ctx.ServerError("checkTypeCloudBrain failed", err)
  740. return
  741. }
  742. fileChunk, err := models.GetFileChunkByUUID(uuid)
  743. if err != nil {
  744. if models.IsErrFileChunkNotExist(err) {
  745. ctx.Error(404)
  746. } else {
  747. ctx.ServerError("GetFileChunkByUUID", err)
  748. }
  749. return
  750. }
  751. if typeCloudBrain == models.TypeCloudBrainOne {
  752. _, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.TotalChunks)
  753. if err != nil {
  754. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  755. return
  756. }
  757. } else {
  758. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName, fileChunk.TotalChunks)
  759. if err != nil {
  760. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  761. return
  762. }
  763. }
  764. fileChunk.IsUploaded = models.FileUploaded
  765. err = models.UpdateFileChunk(fileChunk)
  766. if err != nil {
  767. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  768. return
  769. }
  770. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  771. log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
  772. attachment, err := models.InsertAttachment(&models.Attachment{
  773. UUID: uuid,
  774. UploaderID: ctx.User.ID,
  775. IsPrivate: dataset.IsPrivate(),
  776. Name: fileName,
  777. Size: ctx.QueryInt64("size"),
  778. DatasetID: ctx.QueryInt64("dataset_id"),
  779. Description: ctx.Query("description"),
  780. Type: typeCloudBrain,
  781. })
  782. if err != nil {
  783. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  784. return
  785. }
  786. attachment.UpdateDatasetUpdateUnix()
  787. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  788. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
  789. if attachment.DatasetID != 0 {
  790. if isCanDecompress(attachment.Name) {
  791. if typeCloudBrain == models.TypeCloudBrainOne {
  792. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  793. if err != nil {
  794. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  795. } else {
  796. updateAttachmentDecompressStateIng(attachment)
  797. }
  798. }
  799. if typeCloudBrain == models.TypeCloudBrainTwo {
  800. attachjson, _ := json.Marshal(attachment)
  801. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  802. if err != nil {
  803. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attachment.UUID, err.Error())
  804. } else {
  805. updateAttachmentDecompressStateIng(attachment)
  806. }
  807. }
  808. } else {
  809. var labelMap map[string]string
  810. labelMap = make(map[string]string)
  811. labelMap["UUID"] = uuid
  812. labelMap["Type"] = fmt.Sprint(attachment.Type)
  813. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  814. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  815. labelMap["AttachName"] = attachment.Name
  816. attachjson, _ := json.Marshal(labelMap)
  817. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  818. }
  819. }
  820. ctx.JSON(200, map[string]string{
  821. "result_code": "0",
  822. })
  823. }
  824. func HandleUnDecompressAttachment() {
  825. attachs, err := models.GetUnDecompressAttachments()
  826. if err != nil {
  827. log.Error("GetUnDecompressAttachments failed:", err.Error())
  828. return
  829. }
  830. for _, attach := range attachs {
  831. if attach.Type == models.TypeCloudBrainOne {
  832. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  833. if err != nil {
  834. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  835. } else {
  836. updateAttachmentDecompressStateIng(attach)
  837. }
  838. } else if attach.Type == models.TypeCloudBrainTwo {
  839. attachjson, _ := json.Marshal(attach)
  840. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  841. if err != nil {
  842. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error())
  843. } else {
  844. updateAttachmentDecompressStateIng(attach)
  845. }
  846. }
  847. }
  848. return
  849. }
  850. func updateAttachmentDecompressStateIng(attach *models.Attachment) {
  851. attach.DecompressState = models.DecompressStateIng
  852. err := models.UpdateAttachment(attach)
  853. if err != nil {
  854. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  855. }
  856. }
  857. func QueryAllPublicDataset(ctx *context.Context) {
  858. attachs, err := models.GetAllPublicAttachments()
  859. if err != nil {
  860. ctx.JSON(200, map[string]string{
  861. "result_code": "-1",
  862. "error_msg": err.Error(),
  863. "data": "",
  864. })
  865. return
  866. }
  867. queryDatasets(ctx, attachs)
  868. }
  869. func QueryPrivateDataset(ctx *context.Context) {
  870. username := ctx.Params(":username")
  871. attachs, err := models.GetPrivateAttachments(username)
  872. if err != nil {
  873. ctx.JSON(200, map[string]string{
  874. "result_code": "-1",
  875. "error_msg": err.Error(),
  876. "data": "",
  877. })
  878. return
  879. }
  880. for _, attach := range attachs {
  881. attach.Name = username
  882. }
  883. queryDatasets(ctx, attachs)
  884. }
  885. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  886. var datasets []CloudBrainDataset
  887. if len(attachs) == 0 {
  888. log.Info("dataset is null")
  889. ctx.JSON(200, map[string]string{
  890. "result_code": "0",
  891. "error_msg": "",
  892. "data": "",
  893. })
  894. return
  895. }
  896. for _, attch := range attachs {
  897. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  898. if err != nil || !has {
  899. continue
  900. }
  901. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  902. attch.Attachment.Name,
  903. setting.Attachment.Minio.RealPath +
  904. setting.Attachment.Minio.Bucket + "/" +
  905. setting.Attachment.Minio.BasePath +
  906. models.AttachmentRelativePath(attch.UUID) +
  907. attch.UUID,
  908. attch.Name,
  909. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  910. }
  911. data, err := json.Marshal(datasets)
  912. if err != nil {
  913. log.Error("json.Marshal failed:", err.Error())
  914. ctx.JSON(200, map[string]string{
  915. "result_code": "-1",
  916. "error_msg": err.Error(),
  917. "data": "",
  918. })
  919. return
  920. }
  921. ctx.JSON(200, map[string]string{
  922. "result_code": "0",
  923. "error_msg": "",
  924. "data": string(data),
  925. })
  926. return
  927. }
  928. func checkTypeCloudBrain(typeCloudBrain int) error {
  929. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  930. log.Error("type error:", typeCloudBrain)
  931. return errors.New("type error")
  932. }
  933. return nil
  934. }