You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 27 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/notification"
  21. "code.gitea.io/gitea/modules/setting"
  22. "code.gitea.io/gitea/modules/storage"
  23. "code.gitea.io/gitea/modules/upload"
  24. "code.gitea.io/gitea/modules/worker"
  25. gouuid "github.com/satori/go.uuid"
  26. )
  27. const (
  28. //result of decompress
  29. DecompressSuccess = "0"
  30. DecompressFailed = "1"
  31. )
  32. type CloudBrainDataset struct {
  33. UUID string `json:"id"`
  34. Name string `json:"name"`
  35. Path string `json:"place"`
  36. UserName string `json:"provider"`
  37. CreateTime string `json:"created_at"`
  38. }
  39. type UploadForm struct {
  40. UploadID string `form:"uploadId"`
  41. UuID string `form:"uuid"`
  42. PartSize int64 `form:"size"`
  43. Offset int64 `form:"offset"`
  44. PartNumber int `form:"chunkNumber"`
  45. PartFile multipart.File `form:"file"`
  46. }
  47. func RenderAttachmentSettings(ctx *context.Context) {
  48. renderAttachmentSettings(ctx)
  49. }
  50. func renderAttachmentSettings(ctx *context.Context) {
  51. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  52. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  53. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  54. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  55. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  56. }
  57. // UploadAttachment response for uploading issue's attachment
  58. func UploadAttachment(ctx *context.Context) {
  59. if !setting.Attachment.Enabled {
  60. ctx.Error(404, "attachment is not enabled")
  61. return
  62. }
  63. file, header, err := ctx.Req.FormFile("file")
  64. if err != nil {
  65. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  66. return
  67. }
  68. defer file.Close()
  69. buf := make([]byte, 1024)
  70. n, _ := file.Read(buf)
  71. if n > 0 {
  72. buf = buf[:n]
  73. }
  74. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  75. if err != nil {
  76. ctx.Error(400, err.Error())
  77. return
  78. }
  79. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  80. attach, err := models.NewAttachment(&models.Attachment{
  81. IsPrivate: true,
  82. UploaderID: ctx.User.ID,
  83. Name: header.Filename,
  84. DatasetID: datasetID,
  85. }, buf, file)
  86. if err != nil {
  87. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  88. return
  89. }
  90. log.Trace("New attachment uploaded: %s", attach.UUID)
  91. ctx.JSON(200, map[string]string{
  92. "uuid": attach.UUID,
  93. })
  94. }
  95. func UpdatePublicAttachment(ctx *context.Context) {
  96. file := ctx.Query("file")
  97. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  98. attach, err := models.GetAttachmentByUUID(file)
  99. if err != nil {
  100. ctx.Error(404, err.Error())
  101. return
  102. }
  103. attach.IsPrivate = isPrivate
  104. models.UpdateAttachment(attach)
  105. }
  106. // DeleteAttachment response for deleting issue's attachment
  107. func DeleteAttachment(ctx *context.Context) {
  108. file := ctx.Query("file")
  109. attach, err := models.GetAttachmentByUUID(file)
  110. if err != nil {
  111. ctx.Error(400, err.Error())
  112. return
  113. }
  114. //issue 214: mod del-dataset permission
  115. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  116. ctx.Error(403)
  117. return
  118. }
  119. err = models.DeleteAttachment(attach, true)
  120. if err != nil {
  121. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  122. return
  123. }
  124. attachjson, _ := json.Marshal(attach)
  125. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  126. DeleteAllUnzipFile(attach, "")
  127. _, err = models.DeleteFileChunkById(attach.UUID)
  128. if err != nil {
  129. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  130. return
  131. }
  132. ctx.JSON(200, map[string]string{
  133. "uuid": attach.UUID,
  134. })
  135. }
  136. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  137. dataset, err := models.GetDatasetByID(attach.DatasetID)
  138. if err != nil {
  139. log.Info("query dataset error")
  140. } else {
  141. repo, err := models.GetRepositoryByID(dataset.RepoID)
  142. if err != nil {
  143. log.Info("query repo error.")
  144. } else {
  145. repo.GetOwner()
  146. if ctx.User != nil {
  147. if repo.Owner.IsOrganization() {
  148. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  149. log.Info("org user may visit the attach.")
  150. return true
  151. }
  152. }
  153. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  154. if isCollaborator {
  155. log.Info("Collaborator user may visit the attach.")
  156. return true
  157. }
  158. }
  159. }
  160. }
  161. return false
  162. }
  163. // GetAttachment serve attachements
  164. func GetAttachment(ctx *context.Context) {
  165. typeCloudBrain := ctx.QueryInt("type")
  166. err := checkTypeCloudBrain(typeCloudBrain)
  167. if err != nil {
  168. ctx.ServerError("checkTypeCloudBrain failed", err)
  169. return
  170. }
  171. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  172. if err != nil {
  173. if models.IsErrAttachmentNotExist(err) {
  174. ctx.Error(404)
  175. } else {
  176. ctx.ServerError("GetAttachmentByUUID", err)
  177. }
  178. return
  179. }
  180. repository, unitType, err := attach.LinkedRepository()
  181. if err != nil {
  182. ctx.ServerError("LinkedRepository", err)
  183. return
  184. }
  185. dataSet, err := attach.LinkedDataSet()
  186. if err != nil {
  187. ctx.ServerError("LinkedDataSet", err)
  188. return
  189. }
  190. if repository == nil && dataSet != nil {
  191. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  192. unitType = models.UnitTypeDatasets
  193. }
  194. if repository == nil { //If not linked
  195. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  196. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  197. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  198. ctx.Error(http.StatusNotFound)
  199. return
  200. }
  201. } else { //If we have the repository we check access
  202. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  203. if errPermission != nil {
  204. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  205. return
  206. }
  207. if !perm.CanRead(unitType) {
  208. ctx.Error(http.StatusNotFound)
  209. return
  210. }
  211. }
  212. if dataSet != nil {
  213. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  214. if err != nil {
  215. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  216. return
  217. }
  218. if !isPermit {
  219. ctx.Error(http.StatusNotFound)
  220. return
  221. }
  222. }
  223. //If we have matched and access to release or issue
  224. if setting.Attachment.StoreType == storage.MinioStorageType {
  225. url := ""
  226. if typeCloudBrain == models.TypeCloudBrainOne {
  227. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  228. if err != nil {
  229. ctx.ServerError("PresignedGetURL", err)
  230. return
  231. }
  232. } else {
  233. if setting.PROXYURL != "" {
  234. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  235. log.Info("return url=" + url)
  236. } else {
  237. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  238. if err != nil {
  239. ctx.ServerError("ObsGetPreSignedUrl", err)
  240. return
  241. }
  242. }
  243. }
  244. if err = increaseDownloadCount(attach, dataSet); err != nil {
  245. ctx.ServerError("Update", err)
  246. return
  247. }
  248. if dataSet != nil {
  249. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  250. } else {
  251. fr, err := storage.Attachments.Open(attach.RelativePath())
  252. if err != nil {
  253. ctx.ServerError("Open", err)
  254. return
  255. }
  256. defer fr.Close()
  257. if err = ServeData(ctx, attach.Name, fr); err != nil {
  258. ctx.ServerError("ServeData", err)
  259. return
  260. }
  261. }
  262. } else {
  263. fr, err := storage.Attachments.Open(attach.RelativePath())
  264. if err != nil {
  265. ctx.ServerError("Open", err)
  266. return
  267. }
  268. defer fr.Close()
  269. if err = increaseDownloadCount(attach, dataSet); err != nil {
  270. ctx.ServerError("Update", err)
  271. return
  272. }
  273. if err = ServeData(ctx, attach.Name, fr); err != nil {
  274. ctx.ServerError("ServeData", err)
  275. return
  276. }
  277. }
  278. }
  279. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  280. if err := attach.IncreaseDownloadCount(); err != nil {
  281. return err
  282. }
  283. if dataSet != nil {
  284. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  285. return err
  286. }
  287. }
  288. return nil
  289. }
  290. // Get a presigned url for put object
  291. func GetPresignedPutObjectURL(ctx *context.Context) {
  292. if !setting.Attachment.Enabled {
  293. ctx.Error(404, "attachment is not enabled")
  294. return
  295. }
  296. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  297. if err != nil {
  298. ctx.Error(400, err.Error())
  299. return
  300. }
  301. if setting.Attachment.StoreType == storage.MinioStorageType {
  302. uuid := gouuid.NewV4().String()
  303. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  304. if err != nil {
  305. ctx.ServerError("PresignedPutURL", err)
  306. return
  307. }
  308. ctx.JSON(200, map[string]string{
  309. "uuid": uuid,
  310. "url": url,
  311. })
  312. } else {
  313. ctx.Error(404, "storage type is not enabled")
  314. return
  315. }
  316. }
  317. // AddAttachment response for add attachment record
  318. func AddAttachment(ctx *context.Context) {
  319. typeCloudBrain := ctx.QueryInt("type")
  320. fileName := ctx.Query("file_name")
  321. err := checkTypeCloudBrain(typeCloudBrain)
  322. if err != nil {
  323. ctx.ServerError("checkTypeCloudBrain failed", err)
  324. return
  325. }
  326. uuid := ctx.Query("uuid")
  327. has := false
  328. if typeCloudBrain == models.TypeCloudBrainOne {
  329. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  330. if err != nil {
  331. ctx.ServerError("HasObject", err)
  332. return
  333. }
  334. } else {
  335. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  336. if err != nil {
  337. ctx.ServerError("ObsHasObject", err)
  338. return
  339. }
  340. }
  341. if !has {
  342. ctx.Error(404, "attachment has not been uploaded")
  343. return
  344. }
  345. attachment, err := models.InsertAttachment(&models.Attachment{
  346. UUID: uuid,
  347. UploaderID: ctx.User.ID,
  348. IsPrivate: true,
  349. Name: fileName,
  350. Size: ctx.QueryInt64("size"),
  351. DatasetID: ctx.QueryInt64("dataset_id"),
  352. Type: typeCloudBrain,
  353. })
  354. if err != nil {
  355. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  356. return
  357. }
  358. if attachment.DatasetID != 0 {
  359. if isCanDecompress(attachment.Name) {
  360. if typeCloudBrain == models.TypeCloudBrainOne {
  361. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  362. if err != nil {
  363. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  364. } else {
  365. attachment.DecompressState = models.DecompressStateIng
  366. err = models.UpdateAttachment(attachment)
  367. if err != nil {
  368. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  369. }
  370. }
  371. }
  372. //todo:decompress type_two
  373. }
  374. }
  375. ctx.JSON(200, map[string]string{
  376. "result_code": "0",
  377. })
  378. }
  379. func isCanDecompress(name string) bool {
  380. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  381. return true
  382. }
  383. return false
  384. }
  385. func UpdateAttachmentDecompressState(ctx *context.Context) {
  386. uuid := ctx.Query("uuid")
  387. result := ctx.Query("result")
  388. attach, err := models.GetAttachmentByUUID(uuid)
  389. if err != nil {
  390. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  391. return
  392. }
  393. if result == DecompressSuccess {
  394. attach.DecompressState = models.DecompressStateDone
  395. } else if result == DecompressFailed {
  396. attach.DecompressState = models.DecompressStateFailed
  397. } else {
  398. log.Error("result is error:", result)
  399. return
  400. }
  401. err = models.UpdateAttachment(attach)
  402. if err != nil {
  403. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  404. return
  405. }
  406. log.Info("start to send msg to labelsystem ")
  407. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  408. var labelMap map[string]string
  409. labelMap = make(map[string]string)
  410. labelMap["UUID"] = uuid
  411. labelMap["Type"] = fmt.Sprint(attach.Type)
  412. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  413. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  414. labelMap["AttachName"] = attach.Name
  415. attachjson, _ := json.Marshal(labelMap)
  416. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  417. log.Info("end to send msg to labelsystem ")
  418. ctx.JSON(200, map[string]string{
  419. "result_code": "0",
  420. })
  421. }
  422. func GetSuccessChunks(ctx *context.Context) {
  423. fileMD5 := ctx.Query("md5")
  424. typeCloudBrain := ctx.QueryInt("type")
  425. fileName := ctx.Query("file_name")
  426. var chunks string
  427. err := checkTypeCloudBrain(typeCloudBrain)
  428. if err != nil {
  429. ctx.ServerError("checkTypeCloudBrain failed", err)
  430. return
  431. }
  432. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  433. if err != nil {
  434. if models.IsErrFileChunkNotExist(err) {
  435. ctx.JSON(200, map[string]string{
  436. "uuid": "",
  437. "uploaded": "0",
  438. "uploadID": "",
  439. "chunks": "",
  440. })
  441. } else {
  442. ctx.ServerError("GetFileChunkByMD5", err)
  443. }
  444. return
  445. }
  446. isExist := false
  447. if typeCloudBrain == models.TypeCloudBrainOne {
  448. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  449. if err != nil {
  450. ctx.ServerError("HasObject failed", err)
  451. return
  452. }
  453. } else {
  454. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName)
  455. if err != nil {
  456. ctx.ServerError("ObsHasObject failed", err)
  457. return
  458. }
  459. }
  460. if isExist {
  461. if fileChunk.IsUploaded == models.FileNotUploaded {
  462. log.Info("the file has been uploaded but not recorded")
  463. fileChunk.IsUploaded = models.FileUploaded
  464. if err = models.UpdateFileChunk(fileChunk); err != nil {
  465. log.Error("UpdateFileChunk failed:", err.Error())
  466. }
  467. }
  468. } else {
  469. if fileChunk.IsUploaded == models.FileUploaded {
  470. log.Info("the file has been recorded but not uploaded")
  471. fileChunk.IsUploaded = models.FileNotUploaded
  472. if err = models.UpdateFileChunk(fileChunk); err != nil {
  473. log.Error("UpdateFileChunk failed:", err.Error())
  474. }
  475. }
  476. if typeCloudBrain == models.TypeCloudBrainOne {
  477. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  478. if err != nil {
  479. log.Error("GetPartInfos failed:%v", err.Error())
  480. }
  481. } else {
  482. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  483. if err != nil {
  484. log.Error("GetObsPartInfos failed:%v", err.Error())
  485. }
  486. }
  487. if err != nil {
  488. models.DeleteFileChunk(fileChunk)
  489. ctx.JSON(200, map[string]string{
  490. "uuid": "",
  491. "uploaded": "0",
  492. "uploadID": "",
  493. "chunks": "",
  494. })
  495. return
  496. }
  497. }
  498. var attachID int64
  499. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  500. if err != nil {
  501. if models.IsErrAttachmentNotExist(err) {
  502. attachID = 0
  503. } else {
  504. ctx.ServerError("GetAttachmentByUUID", err)
  505. return
  506. }
  507. } else {
  508. attachID = attach.ID
  509. }
  510. if attach == nil {
  511. ctx.JSON(200, map[string]string{
  512. "uuid": fileChunk.UUID,
  513. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  514. "uploadID": fileChunk.UploadID,
  515. "chunks": string(chunks),
  516. "attachID": "0",
  517. "datasetID": "0",
  518. "fileName": "",
  519. "datasetName": "",
  520. })
  521. return
  522. }
  523. dataset, err := models.GetDatasetByID(attach.DatasetID)
  524. if err != nil {
  525. ctx.ServerError("GetDatasetByID", err)
  526. return
  527. }
  528. ctx.JSON(200, map[string]string{
  529. "uuid": fileChunk.UUID,
  530. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  531. "uploadID": fileChunk.UploadID,
  532. "chunks": string(chunks),
  533. "attachID": strconv.Itoa(int(attachID)),
  534. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  535. "fileName": attach.Name,
  536. "datasetName": dataset.Title,
  537. })
  538. }
  539. func NewMultipart(ctx *context.Context) {
  540. if !setting.Attachment.Enabled {
  541. ctx.Error(404, "attachment is not enabled")
  542. return
  543. }
  544. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  545. if err != nil {
  546. ctx.Error(400, err.Error())
  547. return
  548. }
  549. typeCloudBrain := ctx.QueryInt("type")
  550. err = checkTypeCloudBrain(typeCloudBrain)
  551. if err != nil {
  552. ctx.ServerError("checkTypeCloudBrain failed", err)
  553. return
  554. }
  555. fileName := ctx.Query("file_name")
  556. if setting.Attachment.StoreType == storage.MinioStorageType {
  557. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  558. if totalChunkCounts > minio_ext.MaxPartsCount {
  559. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  560. return
  561. }
  562. fileSize := ctx.QueryInt64("size")
  563. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  564. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  565. return
  566. }
  567. uuid := gouuid.NewV4().String()
  568. var uploadID string
  569. if typeCloudBrain == models.TypeCloudBrainOne {
  570. uploadID, err = storage.NewMultiPartUpload(uuid)
  571. if err != nil {
  572. ctx.ServerError("NewMultipart", err)
  573. return
  574. }
  575. } else {
  576. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  577. if err != nil {
  578. ctx.ServerError("NewObsMultiPartUpload", err)
  579. return
  580. }
  581. }
  582. _, err = models.InsertFileChunk(&models.FileChunk{
  583. UUID: uuid,
  584. UserID: ctx.User.ID,
  585. UploadID: uploadID,
  586. Md5: ctx.Query("md5"),
  587. Size: fileSize,
  588. TotalChunks: totalChunkCounts,
  589. Type: typeCloudBrain,
  590. })
  591. if err != nil {
  592. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  593. return
  594. }
  595. ctx.JSON(200, map[string]string{
  596. "uuid": uuid,
  597. "uploadID": uploadID,
  598. })
  599. } else {
  600. ctx.Error(404, "storage type is not enabled")
  601. return
  602. }
  603. }
  604. func PutOBSProxyUpload(ctx *context.Context) {
  605. uuid := ctx.Query("uuid")
  606. uploadID := ctx.Query("uploadId")
  607. partNumber := ctx.QueryInt("partNumber")
  608. fileName := ctx.Query("file_name")
  609. RequestBody := ctx.Req.Body()
  610. if RequestBody == nil {
  611. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  612. return
  613. }
  614. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  615. if err != nil {
  616. log.Info("upload error.")
  617. }
  618. }
  619. func GetOBSProxyDownload(ctx *context.Context) {
  620. uuid := ctx.Query("uuid")
  621. fileName := ctx.Query("file_name")
  622. body, err := storage.ObsDownload(uuid, fileName)
  623. if err != nil {
  624. log.Info("upload error.")
  625. } else {
  626. defer body.Close()
  627. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  628. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  629. p := make([]byte, 1024)
  630. var readErr error
  631. var readCount int
  632. // 读取对象内容
  633. for {
  634. readCount, readErr = body.Read(p)
  635. if readCount > 0 {
  636. ctx.Resp.Write(p[:readCount])
  637. //fmt.Printf("%s", p[:readCount])
  638. }
  639. if readErr != nil {
  640. break
  641. }
  642. }
  643. }
  644. }
  645. func GetMultipartUploadUrl(ctx *context.Context) {
  646. uuid := ctx.Query("uuid")
  647. uploadID := ctx.Query("uploadID")
  648. partNumber := ctx.QueryInt("chunkNumber")
  649. size := ctx.QueryInt64("size")
  650. fileName := ctx.Query("file_name")
  651. typeCloudBrain := ctx.QueryInt("type")
  652. err := checkTypeCloudBrain(typeCloudBrain)
  653. if err != nil {
  654. ctx.ServerError("checkTypeCloudBrain failed", err)
  655. return
  656. }
  657. url := ""
  658. if typeCloudBrain == models.TypeCloudBrainOne {
  659. if size > minio_ext.MinPartSize {
  660. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  661. return
  662. }
  663. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  664. if err != nil {
  665. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  666. return
  667. }
  668. } else {
  669. if setting.PROXYURL != "" {
  670. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  671. log.Info("return url=" + url)
  672. } else {
  673. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  674. if err != nil {
  675. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  676. return
  677. }
  678. log.Info("url=" + url)
  679. }
  680. }
  681. ctx.JSON(200, map[string]string{
  682. "url": url,
  683. })
  684. }
  685. func GetObsKey(ctx *context.Context) {
  686. uuid := gouuid.NewV4().String()
  687. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  688. ctx.JSON(200, map[string]string{
  689. "uuid": uuid,
  690. "key": key,
  691. "access_key_id": setting.AccessKeyID,
  692. "secret_access_key": setting.SecretAccessKey,
  693. "server": setting.Endpoint,
  694. "bucket": setting.Bucket,
  695. })
  696. }
  697. func CompleteMultipart(ctx *context.Context) {
  698. uuid := ctx.Query("uuid")
  699. uploadID := ctx.Query("uploadID")
  700. typeCloudBrain := ctx.QueryInt("type")
  701. fileName := ctx.Query("file_name")
  702. err := checkTypeCloudBrain(typeCloudBrain)
  703. if err != nil {
  704. ctx.ServerError("checkTypeCloudBrain failed", err)
  705. return
  706. }
  707. fileChunk, err := models.GetFileChunkByUUID(uuid)
  708. if err != nil {
  709. if models.IsErrFileChunkNotExist(err) {
  710. ctx.Error(404)
  711. } else {
  712. ctx.ServerError("GetFileChunkByUUID", err)
  713. }
  714. return
  715. }
  716. if typeCloudBrain == models.TypeCloudBrainOne {
  717. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  718. if err != nil {
  719. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  720. return
  721. }
  722. } else {
  723. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  724. if err != nil {
  725. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  726. return
  727. }
  728. }
  729. fileChunk.IsUploaded = models.FileUploaded
  730. err = models.UpdateFileChunk(fileChunk)
  731. if err != nil {
  732. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  733. return
  734. }
  735. attachment, err := models.InsertAttachment(&models.Attachment{
  736. UUID: uuid,
  737. UploaderID: ctx.User.ID,
  738. IsPrivate: true,
  739. Name: fileName,
  740. Size: ctx.QueryInt64("size"),
  741. DatasetID: ctx.QueryInt64("dataset_id"),
  742. Type: typeCloudBrain,
  743. })
  744. if err != nil {
  745. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  746. return
  747. }
  748. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  749. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  750. notification.NotifyUploadAttachment(ctx.User, repository, attachment)
  751. if attachment.DatasetID != 0 {
  752. if isCanDecompress(attachment.Name) {
  753. if typeCloudBrain == models.TypeCloudBrainOne {
  754. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  755. if err != nil {
  756. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  757. } else {
  758. attachment.DecompressState = models.DecompressStateIng
  759. err = models.UpdateAttachment(attachment)
  760. if err != nil {
  761. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  762. }
  763. }
  764. }
  765. if typeCloudBrain == models.TypeCloudBrainTwo {
  766. attachjson, _ := json.Marshal(attachment)
  767. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  768. }
  769. } else {
  770. var labelMap map[string]string
  771. labelMap = make(map[string]string)
  772. labelMap["UUID"] = uuid
  773. labelMap["Type"] = fmt.Sprint(attachment.Type)
  774. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  775. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  776. labelMap["AttachName"] = attachment.Name
  777. attachjson, _ := json.Marshal(labelMap)
  778. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  779. }
  780. }
  781. ctx.JSON(200, map[string]string{
  782. "result_code": "0",
  783. })
  784. }
  785. func UpdateMultipart(ctx *context.Context) {
  786. uuid := ctx.Query("uuid")
  787. partNumber := ctx.QueryInt("chunkNumber")
  788. etag := ctx.Query("etag")
  789. fileChunk, err := models.GetFileChunkByUUID(uuid)
  790. if err != nil {
  791. if models.IsErrFileChunkNotExist(err) {
  792. ctx.Error(404)
  793. } else {
  794. ctx.ServerError("GetFileChunkByUUID", err)
  795. }
  796. return
  797. }
  798. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  799. err = models.UpdateFileChunk(fileChunk)
  800. if err != nil {
  801. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  802. return
  803. }
  804. ctx.JSON(200, map[string]string{
  805. "result_code": "0",
  806. })
  807. }
  808. func HandleUnDecompressAttachment() {
  809. attachs, err := models.GetUnDecompressAttachments()
  810. if err != nil {
  811. log.Error("GetUnDecompressAttachments failed:", err.Error())
  812. return
  813. }
  814. for _, attach := range attachs {
  815. if attach.Type == models.TypeCloudBrainOne {
  816. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  817. if err != nil {
  818. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  819. } else {
  820. attach.DecompressState = models.DecompressStateIng
  821. err = models.UpdateAttachment(attach)
  822. if err != nil {
  823. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  824. }
  825. }
  826. } else if attach.Type == models.TypeCloudBrainTwo {
  827. attachjson, _ := json.Marshal(attach)
  828. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  829. }
  830. }
  831. return
  832. }
  833. func QueryAllPublicDataset(ctx *context.Context) {
  834. attachs, err := models.GetAllPublicAttachments()
  835. if err != nil {
  836. ctx.JSON(200, map[string]string{
  837. "result_code": "-1",
  838. "error_msg": err.Error(),
  839. "data": "",
  840. })
  841. return
  842. }
  843. queryDatasets(ctx, attachs)
  844. }
  845. func QueryPrivateDataset(ctx *context.Context) {
  846. username := ctx.Params(":username")
  847. attachs, err := models.GetPrivateAttachments(username)
  848. if err != nil {
  849. ctx.JSON(200, map[string]string{
  850. "result_code": "-1",
  851. "error_msg": err.Error(),
  852. "data": "",
  853. })
  854. return
  855. }
  856. for _, attach := range attachs {
  857. attach.Name = username
  858. }
  859. queryDatasets(ctx, attachs)
  860. }
  861. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  862. var datasets []CloudBrainDataset
  863. if len(attachs) == 0 {
  864. log.Info("dataset is null")
  865. ctx.JSON(200, map[string]string{
  866. "result_code": "0",
  867. "error_msg": "",
  868. "data": "",
  869. })
  870. return
  871. }
  872. for _, attch := range attachs {
  873. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  874. if err != nil || !has {
  875. continue
  876. }
  877. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  878. attch.Attachment.Name,
  879. setting.Attachment.Minio.RealPath +
  880. setting.Attachment.Minio.Bucket + "/" +
  881. setting.Attachment.Minio.BasePath +
  882. models.AttachmentRelativePath(attch.UUID) +
  883. attch.UUID,
  884. attch.Name,
  885. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  886. }
  887. data, err := json.Marshal(datasets)
  888. if err != nil {
  889. log.Error("json.Marshal failed:", err.Error())
  890. ctx.JSON(200, map[string]string{
  891. "result_code": "-1",
  892. "error_msg": err.Error(),
  893. "data": "",
  894. })
  895. return
  896. }
  897. ctx.JSON(200, map[string]string{
  898. "result_code": "0",
  899. "error_msg": "",
  900. "data": string(data),
  901. })
  902. return
  903. }
  904. func checkTypeCloudBrain(typeCloudBrain int) error {
  905. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  906. log.Error("type error:", typeCloudBrain)
  907. return errors.New("type error")
  908. }
  909. return nil
  910. }