You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 27 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/setting"
  21. "code.gitea.io/gitea/modules/storage"
  22. "code.gitea.io/gitea/modules/upload"
  23. "code.gitea.io/gitea/modules/worker"
  24. gouuid "github.com/satori/go.uuid"
  25. )
  26. const (
  27. //result of decompress
  28. DecompressSuccess = "0"
  29. DecompressFailed = "1"
  30. )
  31. type CloudBrainDataset struct {
  32. UUID string `json:"id"`
  33. Name string `json:"name"`
  34. Path string `json:"place"`
  35. UserName string `json:"provider"`
  36. CreateTime string `json:"created_at"`
  37. }
  38. type UploadForm struct {
  39. UploadID string `form:"uploadId"`
  40. UuID string `form:"uuid"`
  41. PartSize int64 `form:"size"`
  42. Offset int64 `form:"offset"`
  43. PartNumber int `form:"chunkNumber"`
  44. PartFile multipart.File `form:"file"`
  45. }
  46. func RenderAttachmentSettings(ctx *context.Context) {
  47. renderAttachmentSettings(ctx)
  48. }
  49. func renderAttachmentSettings(ctx *context.Context) {
  50. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  51. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  52. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  53. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  54. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  55. }
  56. // UploadAttachment response for uploading issue's attachment
  57. func UploadAttachment(ctx *context.Context) {
  58. if !setting.Attachment.Enabled {
  59. ctx.Error(404, "attachment is not enabled")
  60. return
  61. }
  62. file, header, err := ctx.Req.FormFile("file")
  63. if err != nil {
  64. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  65. return
  66. }
  67. defer file.Close()
  68. buf := make([]byte, 1024)
  69. n, _ := file.Read(buf)
  70. if n > 0 {
  71. buf = buf[:n]
  72. }
  73. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  74. if err != nil {
  75. ctx.Error(400, err.Error())
  76. return
  77. }
  78. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  79. attach, err := models.NewAttachment(&models.Attachment{
  80. IsPrivate: true,
  81. UploaderID: ctx.User.ID,
  82. Name: header.Filename,
  83. DatasetID: datasetID,
  84. }, buf, file)
  85. if err != nil {
  86. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  87. return
  88. }
  89. log.Trace("New attachment uploaded: %s", attach.UUID)
  90. ctx.JSON(200, map[string]string{
  91. "uuid": attach.UUID,
  92. })
  93. }
  94. func UpdatePublicAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  97. attach, err := models.GetAttachmentByUUID(file)
  98. if err != nil {
  99. ctx.Error(404, err.Error())
  100. return
  101. }
  102. attach.IsPrivate = isPrivate
  103. models.UpdateAttachment(attach)
  104. }
  105. // DeleteAttachment response for deleting issue's attachment
  106. func DeleteAttachment(ctx *context.Context) {
  107. file := ctx.Query("file")
  108. attach, err := models.GetAttachmentByUUID(file)
  109. if err != nil {
  110. ctx.Error(400, err.Error())
  111. return
  112. }
  113. //issue 214: mod del-dataset permission
  114. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  115. ctx.Error(403)
  116. return
  117. }
  118. err = models.DeleteAttachment(attach, true)
  119. if err != nil {
  120. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  121. return
  122. }
  123. attachjson, _ := json.Marshal(attach)
  124. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  125. DeleteAllUnzipFile(attach, "")
  126. _, err = models.DeleteFileChunkById(attach.UUID)
  127. if err != nil {
  128. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  129. return
  130. }
  131. ctx.JSON(200, map[string]string{
  132. "uuid": attach.UUID,
  133. })
  134. }
  135. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  136. dataset, err := models.GetDatasetByID(attach.DatasetID)
  137. if err != nil {
  138. log.Info("query dataset error")
  139. } else {
  140. repo, err := models.GetRepositoryByID(dataset.RepoID)
  141. if err != nil {
  142. log.Info("query repo error.")
  143. } else {
  144. repo.GetOwner()
  145. if ctx.User != nil {
  146. if repo.Owner.IsOrganization() {
  147. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  148. log.Info("org user may visit the attach.")
  149. return true
  150. }
  151. }
  152. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  153. if isCollaborator {
  154. log.Info("Collaborator user may visit the attach.")
  155. return true
  156. }
  157. }
  158. }
  159. }
  160. return false
  161. }
  162. // GetAttachment serve attachements
  163. func GetAttachment(ctx *context.Context) {
  164. typeCloudBrain := ctx.QueryInt("type")
  165. err := checkTypeCloudBrain(typeCloudBrain)
  166. if err != nil {
  167. ctx.ServerError("checkTypeCloudBrain failed", err)
  168. return
  169. }
  170. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  171. if err != nil {
  172. if models.IsErrAttachmentNotExist(err) {
  173. ctx.Error(404)
  174. } else {
  175. ctx.ServerError("GetAttachmentByUUID", err)
  176. }
  177. return
  178. }
  179. repository, unitType, err := attach.LinkedRepository()
  180. if err != nil {
  181. ctx.ServerError("LinkedRepository", err)
  182. return
  183. }
  184. dataSet, err := attach.LinkedDataSet()
  185. if err != nil {
  186. ctx.ServerError("LinkedDataSet", err)
  187. return
  188. }
  189. if repository == nil && dataSet != nil {
  190. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  191. unitType = models.UnitTypeDatasets
  192. }
  193. if repository == nil { //If not linked
  194. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  195. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  196. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  197. ctx.Error(http.StatusNotFound)
  198. return
  199. }
  200. } else { //If we have the repository we check access
  201. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  202. if errPermission != nil {
  203. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  204. return
  205. }
  206. if !perm.CanRead(unitType) {
  207. ctx.Error(http.StatusNotFound)
  208. return
  209. }
  210. }
  211. if dataSet != nil {
  212. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  213. if err != nil {
  214. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  215. return
  216. }
  217. if !isPermit {
  218. ctx.Error(http.StatusNotFound)
  219. return
  220. }
  221. }
  222. //If we have matched and access to release or issue
  223. if setting.Attachment.StoreType == storage.MinioStorageType {
  224. url := ""
  225. if typeCloudBrain == models.TypeCloudBrainOne {
  226. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  227. if err != nil {
  228. ctx.ServerError("PresignedGetURL", err)
  229. return
  230. }
  231. } else {
  232. if setting.PROXYURL != "" {
  233. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  234. log.Info("return url=" + url)
  235. } else {
  236. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  237. if err != nil {
  238. ctx.ServerError("ObsGetPreSignedUrl", err)
  239. return
  240. }
  241. }
  242. }
  243. if err = increaseDownloadCount(attach, dataSet); err != nil {
  244. ctx.ServerError("Update", err)
  245. return
  246. }
  247. if dataSet != nil {
  248. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  249. } else {
  250. fr, err := storage.Attachments.Open(attach.RelativePath())
  251. if err != nil {
  252. ctx.ServerError("Open", err)
  253. return
  254. }
  255. defer fr.Close()
  256. if err = ServeData(ctx, attach.Name, fr); err != nil {
  257. ctx.ServerError("ServeData", err)
  258. return
  259. }
  260. }
  261. } else {
  262. fr, err := storage.Attachments.Open(attach.RelativePath())
  263. if err != nil {
  264. ctx.ServerError("Open", err)
  265. return
  266. }
  267. defer fr.Close()
  268. if err = increaseDownloadCount(attach, dataSet); err != nil {
  269. ctx.ServerError("Update", err)
  270. return
  271. }
  272. if err = ServeData(ctx, attach.Name, fr); err != nil {
  273. ctx.ServerError("ServeData", err)
  274. return
  275. }
  276. }
  277. }
  278. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  279. if err := attach.IncreaseDownloadCount(); err != nil {
  280. return err
  281. }
  282. if dataSet != nil {
  283. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  284. return err
  285. }
  286. }
  287. return nil
  288. }
  289. // Get a presigned url for put object
  290. func GetPresignedPutObjectURL(ctx *context.Context) {
  291. if !setting.Attachment.Enabled {
  292. ctx.Error(404, "attachment is not enabled")
  293. return
  294. }
  295. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  296. if err != nil {
  297. ctx.Error(400, err.Error())
  298. return
  299. }
  300. if setting.Attachment.StoreType == storage.MinioStorageType {
  301. uuid := gouuid.NewV4().String()
  302. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  303. if err != nil {
  304. ctx.ServerError("PresignedPutURL", err)
  305. return
  306. }
  307. ctx.JSON(200, map[string]string{
  308. "uuid": uuid,
  309. "url": url,
  310. })
  311. } else {
  312. ctx.Error(404, "storage type is not enabled")
  313. return
  314. }
  315. }
  316. // AddAttachment response for add attachment record
  317. func AddAttachment(ctx *context.Context) {
  318. typeCloudBrain := ctx.QueryInt("type")
  319. err := checkTypeCloudBrain(typeCloudBrain)
  320. if err != nil {
  321. ctx.ServerError("checkTypeCloudBrain failed", err)
  322. return
  323. }
  324. uuid := ctx.Query("uuid")
  325. has := false
  326. if typeCloudBrain == models.TypeCloudBrainOne {
  327. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  328. if err != nil {
  329. ctx.ServerError("HasObject", err)
  330. return
  331. }
  332. } else {
  333. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
  334. if err != nil {
  335. ctx.ServerError("ObsHasObject", err)
  336. return
  337. }
  338. }
  339. if !has {
  340. ctx.Error(404, "attachment has not been uploaded")
  341. return
  342. }
  343. attachment, err := models.InsertAttachment(&models.Attachment{
  344. UUID: uuid,
  345. UploaderID: ctx.User.ID,
  346. IsPrivate: true,
  347. Name: ctx.Query("file_name"),
  348. Size: ctx.QueryInt64("size"),
  349. DatasetID: ctx.QueryInt64("dataset_id"),
  350. Type: typeCloudBrain,
  351. })
  352. if err != nil {
  353. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  354. return
  355. }
  356. if attachment.DatasetID != 0 {
  357. if isCanDecompress(attachment.Name) {
  358. if typeCloudBrain == models.TypeCloudBrainOne {
  359. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  360. if err != nil {
  361. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  362. } else {
  363. attachment.DecompressState = models.DecompressStateIng
  364. err = models.UpdateAttachment(attachment)
  365. if err != nil {
  366. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  367. }
  368. }
  369. }
  370. //todo:decompress type_two
  371. }
  372. }
  373. ctx.JSON(200, map[string]string{
  374. "result_code": "0",
  375. })
  376. }
  377. func isCanDecompress(name string) bool {
  378. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  379. return true
  380. }
  381. return false
  382. }
  383. func UpdateAttachmentDecompressState(ctx *context.Context) {
  384. uuid := ctx.Query("uuid")
  385. result := ctx.Query("result")
  386. attach, err := models.GetAttachmentByUUID(uuid)
  387. if err != nil {
  388. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  389. return
  390. }
  391. if result == DecompressSuccess {
  392. attach.DecompressState = models.DecompressStateDone
  393. } else if result == DecompressFailed {
  394. attach.DecompressState = models.DecompressStateFailed
  395. } else {
  396. log.Error("result is error:", result)
  397. return
  398. }
  399. err = models.UpdateAttachment(attach)
  400. if err != nil {
  401. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  402. return
  403. }
  404. log.Info("start to send msg to labelsystem ")
  405. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  406. var labelMap map[string]string
  407. labelMap = make(map[string]string)
  408. labelMap["UUID"] = uuid
  409. labelMap["Type"] = fmt.Sprint(attach.Type)
  410. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  411. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  412. labelMap["AttachName"] = attach.Name
  413. attachjson, _ := json.Marshal(labelMap)
  414. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  415. log.Info("end to send msg to labelsystem ")
  416. ctx.JSON(200, map[string]string{
  417. "result_code": "0",
  418. })
  419. }
  420. func GetSuccessChunks(ctx *context.Context) {
  421. fileMD5 := ctx.Query("md5")
  422. typeCloudBrain := ctx.QueryInt("type")
  423. var chunks string
  424. err := checkTypeCloudBrain(typeCloudBrain)
  425. if err != nil {
  426. ctx.ServerError("checkTypeCloudBrain failed", err)
  427. return
  428. }
  429. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  430. if err != nil {
  431. if models.IsErrFileChunkNotExist(err) {
  432. ctx.JSON(200, map[string]string{
  433. "uuid": "",
  434. "uploaded": "0",
  435. "uploadID": "",
  436. "chunks": "",
  437. })
  438. } else {
  439. ctx.ServerError("GetFileChunkByMD5", err)
  440. }
  441. return
  442. }
  443. isExist := false
  444. if typeCloudBrain == models.TypeCloudBrainOne {
  445. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  446. if err != nil {
  447. ctx.ServerError("HasObject failed", err)
  448. return
  449. }
  450. } else {
  451. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileChunk.UUID)
  452. if err != nil {
  453. ctx.ServerError("ObsHasObject failed", err)
  454. return
  455. }
  456. }
  457. if isExist {
  458. if fileChunk.IsUploaded == models.FileNotUploaded {
  459. log.Info("the file has been uploaded but not recorded")
  460. fileChunk.IsUploaded = models.FileUploaded
  461. if err = models.UpdateFileChunk(fileChunk); err != nil {
  462. log.Error("UpdateFileChunk failed:", err.Error())
  463. }
  464. }
  465. } else {
  466. if fileChunk.IsUploaded == models.FileUploaded {
  467. log.Info("the file has been recorded but not uploaded")
  468. fileChunk.IsUploaded = models.FileNotUploaded
  469. if err = models.UpdateFileChunk(fileChunk); err != nil {
  470. log.Error("UpdateFileChunk failed:", err.Error())
  471. }
  472. }
  473. if typeCloudBrain == models.TypeCloudBrainOne {
  474. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  475. if err != nil {
  476. log.Error("GetPartInfos failed:%v", err.Error())
  477. }
  478. } else {
  479. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  480. if err != nil {
  481. log.Error("GetObsPartInfos failed:%v", err.Error())
  482. }
  483. }
  484. if err != nil {
  485. models.DeleteFileChunk(fileChunk)
  486. ctx.JSON(200, map[string]string{
  487. "uuid": "",
  488. "uploaded": "0",
  489. "uploadID": "",
  490. "chunks": "",
  491. })
  492. return
  493. }
  494. }
  495. var attachID int64
  496. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  497. if err != nil {
  498. if models.IsErrAttachmentNotExist(err) {
  499. attachID = 0
  500. } else {
  501. ctx.ServerError("GetAttachmentByUUID", err)
  502. return
  503. }
  504. } else {
  505. attachID = attach.ID
  506. }
  507. if attach == nil {
  508. ctx.JSON(200, map[string]string{
  509. "uuid": fileChunk.UUID,
  510. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  511. "uploadID": fileChunk.UploadID,
  512. "chunks": string(chunks),
  513. "attachID": "0",
  514. "datasetID": "0",
  515. "fileName": "",
  516. "datasetName": "",
  517. })
  518. return
  519. }
  520. dataset, err := models.GetDatasetByID(attach.DatasetID)
  521. if err != nil {
  522. ctx.ServerError("GetDatasetByID", err)
  523. return
  524. }
  525. ctx.JSON(200, map[string]string{
  526. "uuid": fileChunk.UUID,
  527. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  528. "uploadID": fileChunk.UploadID,
  529. "chunks": string(chunks),
  530. "attachID": strconv.Itoa(int(attachID)),
  531. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  532. "fileName": attach.Name,
  533. "datasetName": dataset.Title,
  534. })
  535. }
  536. func NewMultipart(ctx *context.Context) {
  537. if !setting.Attachment.Enabled {
  538. ctx.Error(404, "attachment is not enabled")
  539. return
  540. }
  541. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  542. if err != nil {
  543. ctx.Error(400, err.Error())
  544. return
  545. }
  546. typeCloudBrain := ctx.QueryInt("type")
  547. err = checkTypeCloudBrain(typeCloudBrain)
  548. if err != nil {
  549. ctx.ServerError("checkTypeCloudBrain failed", err)
  550. return
  551. }
  552. fileName := ctx.Query("file_name")
  553. if setting.Attachment.StoreType == storage.MinioStorageType {
  554. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  555. if totalChunkCounts > minio_ext.MaxPartsCount {
  556. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  557. return
  558. }
  559. fileSize := ctx.QueryInt64("size")
  560. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  561. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  562. return
  563. }
  564. uuid := gouuid.NewV4().String()
  565. var uploadID string
  566. if typeCloudBrain == models.TypeCloudBrainOne {
  567. uploadID, err = storage.NewMultiPartUpload(uuid)
  568. if err != nil {
  569. ctx.ServerError("NewMultipart", err)
  570. return
  571. }
  572. } else {
  573. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  574. if err != nil {
  575. ctx.ServerError("NewObsMultiPartUpload", err)
  576. return
  577. }
  578. }
  579. _, err = models.InsertFileChunk(&models.FileChunk{
  580. UUID: uuid,
  581. UserID: ctx.User.ID,
  582. UploadID: uploadID,
  583. Md5: ctx.Query("md5"),
  584. Size: fileSize,
  585. TotalChunks: totalChunkCounts,
  586. Type: typeCloudBrain,
  587. })
  588. if err != nil {
  589. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  590. return
  591. }
  592. ctx.JSON(200, map[string]string{
  593. "uuid": uuid,
  594. "uploadID": uploadID,
  595. })
  596. } else {
  597. ctx.Error(404, "storage type is not enabled")
  598. return
  599. }
  600. }
  601. func PutOBSProxyUpload(ctx *context.Context) {
  602. uuid := ctx.Query("uuid")
  603. uploadID := ctx.Query("uploadId")
  604. partNumber := ctx.QueryInt("partNumber")
  605. fileName := ctx.Query("file_name")
  606. RequestBody := ctx.Req.Body()
  607. if RequestBody == nil {
  608. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  609. return
  610. }
  611. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  612. if err != nil {
  613. log.Info("upload error.")
  614. }
  615. }
  616. func GetOBSProxyDownload(ctx *context.Context) {
  617. uuid := ctx.Query("uuid")
  618. fileName := ctx.Query("file_name")
  619. body, err := storage.ObsDownload(uuid, fileName)
  620. if err != nil {
  621. log.Info("upload error.")
  622. } else {
  623. defer body.Close()
  624. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  625. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  626. p := make([]byte, 1024)
  627. var readErr error
  628. var readCount int
  629. // 读取对象内容
  630. for {
  631. readCount, readErr = body.Read(p)
  632. if readCount > 0 {
  633. ctx.Resp.Write(p[:readCount])
  634. //fmt.Printf("%s", p[:readCount])
  635. }
  636. if readErr != nil {
  637. break
  638. }
  639. }
  640. }
  641. }
  642. func GetMultipartUploadUrl(ctx *context.Context) {
  643. uuid := ctx.Query("uuid")
  644. uploadID := ctx.Query("uploadID")
  645. partNumber := ctx.QueryInt("chunkNumber")
  646. size := ctx.QueryInt64("size")
  647. fileName := ctx.Query("file_name")
  648. typeCloudBrain := ctx.QueryInt("type")
  649. err := checkTypeCloudBrain(typeCloudBrain)
  650. if err != nil {
  651. ctx.ServerError("checkTypeCloudBrain failed", err)
  652. return
  653. }
  654. url := ""
  655. if typeCloudBrain == models.TypeCloudBrainOne {
  656. if size > minio_ext.MinPartSize {
  657. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  658. return
  659. }
  660. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  661. if err != nil {
  662. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  663. return
  664. }
  665. } else {
  666. if setting.PROXYURL != "" {
  667. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  668. log.Info("return url=" + url)
  669. } else {
  670. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  671. if err != nil {
  672. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  673. return
  674. }
  675. log.Info("url=" + url)
  676. }
  677. }
  678. ctx.JSON(200, map[string]string{
  679. "url": url,
  680. })
  681. }
  682. func GetObsKey(ctx *context.Context) {
  683. uuid := gouuid.NewV4().String()
  684. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  685. ctx.JSON(200, map[string]string{
  686. "uuid": uuid,
  687. "key": key,
  688. "access_key_id": setting.AccessKeyID,
  689. "secret_access_key": setting.SecretAccessKey,
  690. "server": setting.Endpoint,
  691. "bucket": setting.Bucket,
  692. })
  693. }
  694. func CompleteMultipart(ctx *context.Context) {
  695. uuid := ctx.Query("uuid")
  696. uploadID := ctx.Query("uploadID")
  697. typeCloudBrain := ctx.QueryInt("type")
  698. fileName := ctx.Query("file_name")
  699. err := checkTypeCloudBrain(typeCloudBrain)
  700. if err != nil {
  701. ctx.ServerError("checkTypeCloudBrain failed", err)
  702. return
  703. }
  704. fileChunk, err := models.GetFileChunkByUUID(uuid)
  705. if err != nil {
  706. if models.IsErrFileChunkNotExist(err) {
  707. ctx.Error(404)
  708. } else {
  709. ctx.ServerError("GetFileChunkByUUID", err)
  710. }
  711. return
  712. }
  713. if typeCloudBrain == models.TypeCloudBrainOne {
  714. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  715. if err != nil {
  716. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  717. return
  718. }
  719. } else {
  720. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  721. if err != nil {
  722. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  723. return
  724. }
  725. }
  726. fileChunk.IsUploaded = models.FileUploaded
  727. err = models.UpdateFileChunk(fileChunk)
  728. if err != nil {
  729. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  730. return
  731. }
  732. attachment, err := models.InsertAttachment(&models.Attachment{
  733. UUID: uuid,
  734. UploaderID: ctx.User.ID,
  735. IsPrivate: true,
  736. Name: fileName,
  737. Size: ctx.QueryInt64("size"),
  738. DatasetID: ctx.QueryInt64("dataset_id"),
  739. Type: typeCloudBrain,
  740. })
  741. if err != nil {
  742. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  743. return
  744. }
  745. if attachment.DatasetID != 0 {
  746. if isCanDecompress(attachment.Name) {
  747. if typeCloudBrain == models.TypeCloudBrainOne {
  748. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  749. if err != nil {
  750. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  751. } else {
  752. attachment.DecompressState = models.DecompressStateIng
  753. err = models.UpdateAttachment(attachment)
  754. if err != nil {
  755. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  756. }
  757. }
  758. }
  759. if typeCloudBrain == models.TypeCloudBrainTwo {
  760. attachjson, _ := json.Marshal(attachment)
  761. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  762. }
  763. } else {
  764. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  765. var labelMap map[string]string
  766. labelMap = make(map[string]string)
  767. labelMap["UUID"] = uuid
  768. labelMap["Type"] = fmt.Sprint(attachment.Type)
  769. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  770. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  771. labelMap["AttachName"] = attachment.Name
  772. attachjson, _ := json.Marshal(labelMap)
  773. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  774. }
  775. }
  776. ctx.JSON(200, map[string]string{
  777. "result_code": "0",
  778. })
  779. }
  780. func UpdateMultipart(ctx *context.Context) {
  781. uuid := ctx.Query("uuid")
  782. partNumber := ctx.QueryInt("chunkNumber")
  783. etag := ctx.Query("etag")
  784. fileChunk, err := models.GetFileChunkByUUID(uuid)
  785. if err != nil {
  786. if models.IsErrFileChunkNotExist(err) {
  787. ctx.Error(404)
  788. } else {
  789. ctx.ServerError("GetFileChunkByUUID", err)
  790. }
  791. return
  792. }
  793. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  794. err = models.UpdateFileChunk(fileChunk)
  795. if err != nil {
  796. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  797. return
  798. }
  799. ctx.JSON(200, map[string]string{
  800. "result_code": "0",
  801. })
  802. }
  803. func HandleUnDecompressAttachment() {
  804. attachs, err := models.GetUnDecompressAttachments()
  805. if err != nil {
  806. log.Error("GetUnDecompressAttachments failed:", err.Error())
  807. return
  808. }
  809. for _, attach := range attachs {
  810. if attach.Type == models.TypeCloudBrainOne {
  811. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  812. if err != nil {
  813. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  814. } else {
  815. attach.DecompressState = models.DecompressStateIng
  816. err = models.UpdateAttachment(attach)
  817. if err != nil {
  818. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  819. }
  820. }
  821. } else if attach.Type == models.TypeCloudBrainTwo {
  822. attachjson, _ := json.Marshal(attach)
  823. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  824. }
  825. }
  826. return
  827. }
  828. func QueryAllPublicDataset(ctx *context.Context) {
  829. attachs, err := models.GetAllPublicAttachments()
  830. if err != nil {
  831. ctx.JSON(200, map[string]string{
  832. "result_code": "-1",
  833. "error_msg": err.Error(),
  834. "data": "",
  835. })
  836. return
  837. }
  838. queryDatasets(ctx, attachs)
  839. }
  840. func QueryPrivateDataset(ctx *context.Context) {
  841. username := ctx.Params(":username")
  842. attachs, err := models.GetPrivateAttachments(username)
  843. if err != nil {
  844. ctx.JSON(200, map[string]string{
  845. "result_code": "-1",
  846. "error_msg": err.Error(),
  847. "data": "",
  848. })
  849. return
  850. }
  851. for _, attach := range attachs {
  852. attach.Name = username
  853. }
  854. queryDatasets(ctx, attachs)
  855. }
  856. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  857. var datasets []CloudBrainDataset
  858. if len(attachs) == 0 {
  859. log.Info("dataset is null")
  860. ctx.JSON(200, map[string]string{
  861. "result_code": "0",
  862. "error_msg": "",
  863. "data": "",
  864. })
  865. return
  866. }
  867. for _, attch := range attachs {
  868. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  869. if err != nil || !has {
  870. continue
  871. }
  872. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  873. attch.Attachment.Name,
  874. setting.Attachment.Minio.RealPath +
  875. setting.Attachment.Minio.Bucket + "/" +
  876. setting.Attachment.Minio.BasePath +
  877. models.AttachmentRelativePath(attch.UUID) +
  878. attch.UUID,
  879. attch.Name,
  880. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  881. }
  882. data, err := json.Marshal(datasets)
  883. if err != nil {
  884. log.Error("json.Marshal failed:", err.Error())
  885. ctx.JSON(200, map[string]string{
  886. "result_code": "-1",
  887. "error_msg": err.Error(),
  888. "data": "",
  889. })
  890. return
  891. }
  892. ctx.JSON(200, map[string]string{
  893. "result_code": "0",
  894. "error_msg": "",
  895. "data": string(data),
  896. })
  897. return
  898. }
  899. func checkTypeCloudBrain(typeCloudBrain int) error {
  900. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  901. log.Error("type error:", typeCloudBrain)
  902. return errors.New("type error")
  903. }
  904. return nil
  905. }