You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 17 kB

5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. "code.gitea.io/gitea/models"
  7. "code.gitea.io/gitea/modules/context"
  8. "code.gitea.io/gitea/modules/log"
  9. "code.gitea.io/gitea/modules/minio_ext"
  10. "code.gitea.io/gitea/modules/setting"
  11. "code.gitea.io/gitea/modules/storage"
  12. "code.gitea.io/gitea/modules/upload"
  13. "code.gitea.io/gitea/modules/worker"
  14. contexExt "context"
  15. "encoding/json"
  16. "fmt"
  17. "net/http"
  18. "strconv"
  19. "strings"
  20. gouuid "github.com/satori/go.uuid"
  21. )
  22. const (
  23. //result of decompress
  24. DecompressSuccess = "0"
  25. DecompressFailed = "1"
  26. )
  27. type CloudBrainDataset struct {
  28. UUID string `json:"id"`
  29. Name string `json:"name"`
  30. Path string `json:"place"`
  31. UserName string `json:"provider"`
  32. CreateTime string `json:"created_at"`
  33. }
  34. func RenderAttachmentSettings(ctx *context.Context) {
  35. renderAttachmentSettings(ctx)
  36. }
  37. func renderAttachmentSettings(ctx *context.Context) {
  38. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  39. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  40. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  41. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  42. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  43. }
  44. // UploadAttachment response for uploading issue's attachment
  45. func UploadAttachment(ctx *context.Context) {
  46. if !setting.Attachment.Enabled {
  47. ctx.Error(404, "attachment is not enabled")
  48. return
  49. }
  50. file, header, err := ctx.Req.FormFile("file")
  51. if err != nil {
  52. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  53. return
  54. }
  55. defer file.Close()
  56. buf := make([]byte, 1024)
  57. n, _ := file.Read(buf)
  58. if n > 0 {
  59. buf = buf[:n]
  60. }
  61. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  62. if err != nil {
  63. ctx.Error(400, err.Error())
  64. return
  65. }
  66. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  67. attach, err := models.NewAttachment(&models.Attachment{
  68. IsPrivate: true,
  69. UploaderID: ctx.User.ID,
  70. Name: header.Filename,
  71. DatasetID: datasetID,
  72. }, buf, file)
  73. if err != nil {
  74. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  75. return
  76. }
  77. log.Trace("New attachment uploaded: %s", attach.UUID)
  78. ctx.JSON(200, map[string]string{
  79. "uuid": attach.UUID,
  80. })
  81. }
  82. func UpdatePublicAttachment(ctx *context.Context) {
  83. file := ctx.Query("file")
  84. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  85. attach, err := models.GetAttachmentByUUID(file)
  86. if err != nil {
  87. ctx.Error(404, err.Error())
  88. return
  89. }
  90. attach.IsPrivate = isPrivate
  91. models.UpdateAttachment(attach)
  92. }
  93. // DeleteAttachment response for deleting issue's attachment
  94. func DeleteAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. attach, err := models.GetAttachmentByUUID(file)
  97. if err != nil {
  98. ctx.Error(400, err.Error())
  99. return
  100. }
  101. if !ctx.IsSigned || (ctx.User.ID != attach.UploaderID) {
  102. ctx.Error(403)
  103. return
  104. }
  105. err = models.DeleteAttachment(attach, false)
  106. if err != nil {
  107. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  108. return
  109. }
  110. ctx.JSON(200, map[string]string{
  111. "uuid": attach.UUID,
  112. })
  113. }
  114. // GetAttachment serve attachements
  115. func GetAttachment(ctx *context.Context) {
  116. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  117. if err != nil {
  118. if models.IsErrAttachmentNotExist(err) {
  119. ctx.Error(404)
  120. } else {
  121. ctx.ServerError("GetAttachmentByUUID", err)
  122. }
  123. return
  124. }
  125. repository, unitType, err := attach.LinkedRepository()
  126. if err != nil {
  127. ctx.ServerError("LinkedRepository", err)
  128. return
  129. }
  130. if repository == nil { //If not linked
  131. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  132. ctx.Error(http.StatusNotFound)
  133. return
  134. }
  135. } else { //If we have the repository we check access
  136. perm, err := models.GetUserRepoPermission(repository, ctx.User)
  137. if err != nil {
  138. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err.Error())
  139. return
  140. }
  141. if !perm.CanRead(unitType) {
  142. ctx.Error(http.StatusNotFound)
  143. return
  144. }
  145. }
  146. dataSet, err := attach.LinkedDataSet()
  147. if err != nil {
  148. ctx.ServerError("LinkedDataSet", err)
  149. return
  150. }
  151. if dataSet != nil {
  152. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  153. if err != nil {
  154. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  155. return
  156. }
  157. if !isPermit {
  158. ctx.Error(http.StatusNotFound)
  159. return
  160. }
  161. }
  162. //If we have matched and access to release or issue
  163. if setting.Attachment.StoreType == storage.MinioStorageType {
  164. url, err := storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name)
  165. if err != nil {
  166. ctx.ServerError("PresignedGetURL", err)
  167. return
  168. }
  169. if err = increaseDownloadCount(attach, dataSet); err != nil {
  170. ctx.ServerError("Update", err)
  171. return
  172. }
  173. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  174. } else {
  175. fr, err := storage.Attachments.Open(attach.RelativePath())
  176. if err != nil {
  177. ctx.ServerError("Open", err)
  178. return
  179. }
  180. defer fr.Close()
  181. if err = increaseDownloadCount(attach, dataSet); err != nil {
  182. ctx.ServerError("Update", err)
  183. return
  184. }
  185. if err = ServeData(ctx, attach.Name, fr); err != nil {
  186. ctx.ServerError("ServeData", err)
  187. return
  188. }
  189. }
  190. }
  191. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  192. if err := attach.IncreaseDownloadCount(); err != nil {
  193. return err
  194. }
  195. if dataSet != nil {
  196. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  197. return err
  198. }
  199. }
  200. return nil
  201. }
  202. // Get a presigned url for put object
  203. func GetPresignedPutObjectURL(ctx *context.Context) {
  204. if !setting.Attachment.Enabled {
  205. ctx.Error(404, "attachment is not enabled")
  206. return
  207. }
  208. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  209. if err != nil {
  210. ctx.Error(400, err.Error())
  211. return
  212. }
  213. if setting.Attachment.StoreType == storage.MinioStorageType {
  214. uuid := gouuid.NewV4().String()
  215. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  216. if err != nil {
  217. ctx.ServerError("PresignedPutURL", err)
  218. return
  219. }
  220. ctx.JSON(200, map[string]string{
  221. "uuid": uuid,
  222. "url": url,
  223. })
  224. } else {
  225. ctx.Error(404, "storage type is not enabled")
  226. return
  227. }
  228. }
  229. // AddAttachment response for add attachment record
  230. func AddAttachment(ctx *context.Context) {
  231. uuid := ctx.Query("uuid")
  232. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  233. if err != nil {
  234. ctx.ServerError("HasObject", err)
  235. return
  236. }
  237. if !has {
  238. ctx.Error(404, "attachment has not been uploaded")
  239. return
  240. }
  241. attachment, err := models.InsertAttachment(&models.Attachment{
  242. UUID: uuid,
  243. UploaderID: ctx.User.ID,
  244. IsPrivate: true,
  245. Name: ctx.Query("file_name"),
  246. Size: ctx.QueryInt64("size"),
  247. DatasetID: ctx.QueryInt64("dataset_id"),
  248. })
  249. if err != nil {
  250. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  251. return
  252. }
  253. if attachment.DatasetID != 0 {
  254. if strings.HasSuffix(attachment.Name, ".zip") {
  255. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  256. if err != nil {
  257. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  258. } else {
  259. attachment.DecompressState = models.DecompressStateIng
  260. err = models.UpdateAttachment(attachment)
  261. if err != nil {
  262. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  263. }
  264. }
  265. }
  266. }
  267. ctx.JSON(200, map[string]string{
  268. "result_code": "0",
  269. })
  270. }
  271. func UpdateAttachmentDecompressState(ctx *context.Context) {
  272. uuid := ctx.Query("uuid")
  273. result := ctx.Query("result")
  274. attach, err := models.GetAttachmentByUUID(uuid)
  275. if err != nil {
  276. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  277. return
  278. }
  279. if result == DecompressSuccess {
  280. attach.DecompressState = models.DecompressStateDone
  281. } else if result == DecompressFailed {
  282. attach.DecompressState = models.DecompressStateFailed
  283. } else {
  284. log.Error("result is error:", result)
  285. return
  286. }
  287. err = models.UpdateAttachment(attach)
  288. if err != nil {
  289. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  290. return
  291. }
  292. ctx.JSON(200, map[string]string{
  293. "result_code": "0",
  294. })
  295. }
  296. func GetSuccessChunks(ctx *context.Context) {
  297. fileMD5 := ctx.Query("md5")
  298. var chunks string
  299. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID)
  300. if err != nil {
  301. if models.IsErrFileChunkNotExist(err) {
  302. ctx.JSON(200, map[string]string{
  303. "uuid": "",
  304. "uploaded": "0",
  305. "uploadID": "",
  306. "chunks": "",
  307. })
  308. } else {
  309. ctx.ServerError("GetFileChunkByMD5", err)
  310. }
  311. return
  312. }
  313. isExist, err := storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  314. if err != nil {
  315. ctx.ServerError("HasObject failed", err)
  316. return
  317. }
  318. if isExist {
  319. if fileChunk.IsUploaded == models.FileNotUploaded {
  320. log.Info("the file has been uploaded but not recorded")
  321. fileChunk.IsUploaded = models.FileUploaded
  322. if err = models.UpdateFileChunk(fileChunk); err != nil {
  323. log.Error("UpdateFileChunk failed:", err.Error())
  324. }
  325. }
  326. } else {
  327. if fileChunk.IsUploaded == models.FileUploaded {
  328. log.Info("the file has been recorded but not uploaded")
  329. fileChunk.IsUploaded = models.FileNotUploaded
  330. if err = models.UpdateFileChunk(fileChunk); err != nil {
  331. log.Error("UpdateFileChunk failed:", err.Error())
  332. }
  333. }
  334. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  335. if err != nil {
  336. ctx.ServerError("GetPartInfos failed", err)
  337. return
  338. }
  339. }
  340. var attachID int64
  341. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  342. if err != nil {
  343. if models.IsErrAttachmentNotExist(err) {
  344. attachID = 0
  345. } else {
  346. ctx.ServerError("GetAttachmentByUUID", err)
  347. return
  348. }
  349. } else {
  350. attachID = attach.ID
  351. }
  352. if attach == nil {
  353. ctx.JSON(200, map[string]string{
  354. "uuid": fileChunk.UUID,
  355. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  356. "uploadID": fileChunk.UploadID,
  357. "chunks": string(chunks),
  358. "attachID": "0",
  359. "datasetID": "0",
  360. "fileName": "",
  361. "datasetName": "",
  362. })
  363. return
  364. }
  365. dataset, err := models.GetDatasetByID(attach.DatasetID)
  366. if err != nil {
  367. ctx.ServerError("GetDatasetByID", err)
  368. return
  369. }
  370. ctx.JSON(200, map[string]string{
  371. "uuid": fileChunk.UUID,
  372. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  373. "uploadID": fileChunk.UploadID,
  374. "chunks": string(chunks),
  375. "attachID": strconv.Itoa(int(attachID)),
  376. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  377. "fileName": attach.Name,
  378. "datasetName": dataset.Title,
  379. })
  380. }
  381. func NewMultipart(ctx *context.Context) {
  382. if !setting.Attachment.Enabled {
  383. ctx.Error(404, "attachment is not enabled")
  384. return
  385. }
  386. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  387. if err != nil {
  388. ctx.Error(400, err.Error())
  389. return
  390. }
  391. if setting.Attachment.StoreType == storage.MinioStorageType {
  392. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  393. if totalChunkCounts > minio_ext.MaxPartsCount {
  394. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  395. return
  396. }
  397. fileSize := ctx.QueryInt64("size")
  398. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  399. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  400. return
  401. }
  402. uuid := gouuid.NewV4().String()
  403. uploadID, err := storage.NewMultiPartUpload(uuid)
  404. if err != nil {
  405. ctx.ServerError("NewMultipart", err)
  406. return
  407. }
  408. _, err = models.InsertFileChunk(&models.FileChunk{
  409. UUID: uuid,
  410. UserID: ctx.User.ID,
  411. UploadID: uploadID,
  412. Md5: ctx.Query("md5"),
  413. Size: fileSize,
  414. TotalChunks: totalChunkCounts,
  415. })
  416. if err != nil {
  417. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  418. return
  419. }
  420. ctx.JSON(200, map[string]string{
  421. "uuid": uuid,
  422. "uploadID": uploadID,
  423. })
  424. } else {
  425. ctx.Error(404, "storage type is not enabled")
  426. return
  427. }
  428. }
  429. func GetMultipartUploadUrl(ctx *context.Context) {
  430. uuid := ctx.Query("uuid")
  431. uploadID := ctx.Query("uploadID")
  432. partNumber := ctx.QueryInt("chunkNumber")
  433. size := ctx.QueryInt64("size")
  434. if size > minio_ext.MinPartSize {
  435. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  436. return
  437. }
  438. url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  439. if err != nil {
  440. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  441. return
  442. }
  443. ctx.JSON(200, map[string]string{
  444. "url": url,
  445. })
  446. }
  447. func CompleteMultipart(ctx *context.Context) {
  448. uuid := ctx.Query("uuid")
  449. uploadID := ctx.Query("uploadID")
  450. fileChunk, err := models.GetFileChunkByUUID(uuid)
  451. if err != nil {
  452. if models.IsErrFileChunkNotExist(err) {
  453. ctx.Error(404)
  454. } else {
  455. ctx.ServerError("GetFileChunkByUUID", err)
  456. }
  457. return
  458. }
  459. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  460. if err != nil {
  461. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  462. return
  463. }
  464. fileChunk.IsUploaded = models.FileUploaded
  465. err = models.UpdateFileChunk(fileChunk)
  466. if err != nil {
  467. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  468. return
  469. }
  470. attachment, err := models.InsertAttachment(&models.Attachment{
  471. UUID: uuid,
  472. UploaderID: ctx.User.ID,
  473. IsPrivate: true,
  474. Name: ctx.Query("file_name"),
  475. Size: ctx.QueryInt64("size"),
  476. DatasetID: ctx.QueryInt64("dataset_id"),
  477. })
  478. if err != nil {
  479. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  480. return
  481. }
  482. if attachment.DatasetID != 0 {
  483. if strings.HasSuffix(attachment.Name, ".zip") {
  484. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  485. if err != nil {
  486. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  487. } else {
  488. attachment.DecompressState = models.DecompressStateIng
  489. err = models.UpdateAttachment(attachment)
  490. if err != nil {
  491. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  492. }
  493. }
  494. }
  495. }
  496. ctx.JSON(200, map[string]string{
  497. "result_code": "0",
  498. })
  499. }
  500. func UpdateMultipart(ctx *context.Context) {
  501. uuid := ctx.Query("uuid")
  502. partNumber := ctx.QueryInt("chunkNumber")
  503. etag := ctx.Query("etag")
  504. fileChunk, err := models.GetFileChunkByUUID(uuid)
  505. if err != nil {
  506. if models.IsErrFileChunkNotExist(err) {
  507. ctx.Error(404)
  508. } else {
  509. ctx.ServerError("GetFileChunkByUUID", err)
  510. }
  511. return
  512. }
  513. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  514. err = models.UpdateFileChunk(fileChunk)
  515. if err != nil {
  516. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  517. return
  518. }
  519. ctx.JSON(200, map[string]string{
  520. "result_code": "0",
  521. })
  522. }
  523. func HandleUnDecompressAttachment() {
  524. attachs, err := models.GetUnDecompressAttachments()
  525. if err != nil {
  526. log.Error("GetUnDecompressAttachments failed:", err.Error())
  527. return
  528. }
  529. for _, attach := range attachs {
  530. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID)
  531. if err != nil {
  532. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  533. } else {
  534. attach.DecompressState = models.DecompressStateIng
  535. err = models.UpdateAttachment(attach)
  536. if err != nil {
  537. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  538. }
  539. }
  540. }
  541. return
  542. }
  543. func QueryAllPublicDataset(ctx *context.Context){
  544. attachs, err := models.GetAllPublicAttachments()
  545. if err != nil {
  546. ctx.JSON(200, map[string]string{
  547. "result_code": "-1",
  548. "error_msg": err.Error(),
  549. "data": "",
  550. })
  551. return
  552. }
  553. queryDatasets(ctx, attachs)
  554. }
  555. func QueryPrivateDataset(ctx *context.Context){
  556. username := ctx.Params(":username")
  557. attachs, err := models.GetPrivateAttachments(username)
  558. if err != nil {
  559. ctx.JSON(200, map[string]string{
  560. "result_code": "-1",
  561. "error_msg": err.Error(),
  562. "data": "",
  563. })
  564. return
  565. }
  566. for _, attach := range attachs {
  567. attach.Name = username
  568. }
  569. queryDatasets(ctx, attachs)
  570. }
  571. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  572. var datasets []CloudBrainDataset
  573. if len(attachs) == 0 {
  574. log.Info("dataset is null")
  575. ctx.JSON(200, map[string]string{
  576. "result_code": "0",
  577. "error_msg": "",
  578. "data": "",
  579. })
  580. return
  581. }
  582. for _, attch := range attachs {
  583. has,err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  584. if err != nil || !has {
  585. continue
  586. }
  587. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  588. attch.Attachment.Name,
  589. setting.Attachment.Minio.RealPath +
  590. setting.Attachment.Minio.Bucket + "/" +
  591. setting.Attachment.Minio.BasePath +
  592. models.AttachmentRelativePath(attch.UUID) +
  593. attch.UUID,
  594. attch.Name,
  595. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  596. }
  597. data,err := json.Marshal(datasets)
  598. if err != nil {
  599. log.Error("json.Marshal failed:", err.Error())
  600. ctx.JSON(200, map[string]string{
  601. "result_code": "-1",
  602. "error_msg": err.Error(),
  603. "data": "",
  604. })
  605. return
  606. }
  607. ctx.JSON(200, map[string]string{
  608. "result_code": "0",
  609. "error_msg": "",
  610. "data": string(data),
  611. })
  612. return
  613. }