You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 28 kB

4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
3 years ago
4 years ago
3 years ago
5 years ago
3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/modules/base"
  16. "code.gitea.io/gitea/models"
  17. "code.gitea.io/gitea/modules/context"
  18. "code.gitea.io/gitea/modules/labelmsg"
  19. "code.gitea.io/gitea/modules/log"
  20. "code.gitea.io/gitea/modules/minio_ext"
  21. "code.gitea.io/gitea/modules/notification"
  22. "code.gitea.io/gitea/modules/setting"
  23. "code.gitea.io/gitea/modules/storage"
  24. "code.gitea.io/gitea/modules/upload"
  25. "code.gitea.io/gitea/modules/worker"
  26. gouuid "github.com/satori/go.uuid"
  27. )
  28. const (
  29. //result of decompress
  30. DecompressSuccess = "0"
  31. DecompressFailed = "1"
  32. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  33. )
  34. type CloudBrainDataset struct {
  35. UUID string `json:"id"`
  36. Name string `json:"name"`
  37. Path string `json:"place"`
  38. UserName string `json:"provider"`
  39. CreateTime string `json:"created_at"`
  40. }
  41. type UploadForm struct {
  42. UploadID string `form:"uploadId"`
  43. UuID string `form:"uuid"`
  44. PartSize int64 `form:"size"`
  45. Offset int64 `form:"offset"`
  46. PartNumber int `form:"chunkNumber"`
  47. PartFile multipart.File `form:"file"`
  48. }
  49. func RenderAttachmentSettings(ctx *context.Context) {
  50. renderAttachmentSettings(ctx)
  51. }
  52. func renderAttachmentSettings(ctx *context.Context) {
  53. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  54. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  55. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  56. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  57. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  58. }
  59. func UploadAttachmentUI(ctx *context.Context) {
  60. ctx.Data["datasetId"] = ctx.Query("datasetId")
  61. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("datasetId"))
  62. if dataset == nil {
  63. ctx.Error(404, "The dataset does not exits.")
  64. }
  65. r, _ := models.GetRepositoryByID(dataset.RepoID)
  66. ctx.Data["Repo"] = r
  67. ctx.HTML(200, tplAttachmentUpload)
  68. }
  69. // UploadAttachment response for uploading issue's attachment
  70. func UploadAttachment(ctx *context.Context) {
  71. if !setting.Attachment.Enabled {
  72. ctx.Error(404, "attachment is not enabled")
  73. return
  74. }
  75. file, header, err := ctx.Req.FormFile("file")
  76. if err != nil {
  77. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  78. return
  79. }
  80. defer file.Close()
  81. buf := make([]byte, 1024)
  82. n, _ := file.Read(buf)
  83. if n > 0 {
  84. buf = buf[:n]
  85. }
  86. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  87. if err != nil {
  88. ctx.Error(400, err.Error())
  89. return
  90. }
  91. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  92. attach, err := models.NewAttachment(&models.Attachment{
  93. IsPrivate: true,
  94. UploaderID: ctx.User.ID,
  95. Name: header.Filename,
  96. DatasetID: datasetID,
  97. }, buf, file)
  98. if err != nil {
  99. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  100. return
  101. }
  102. log.Trace("New attachment uploaded: %s", attach.UUID)
  103. ctx.JSON(200, map[string]string{
  104. "uuid": attach.UUID,
  105. })
  106. }
  107. func UpdatePublicAttachment(ctx *context.Context) {
  108. file := ctx.Query("file")
  109. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  110. attach, err := models.GetAttachmentByUUID(file)
  111. if err != nil {
  112. ctx.Error(404, err.Error())
  113. return
  114. }
  115. attach.IsPrivate = isPrivate
  116. models.UpdateAttachment(attach)
  117. }
  118. // DeleteAttachment response for deleting issue's attachment
  119. func DeleteAttachment(ctx *context.Context) {
  120. file := ctx.Query("file")
  121. attach, err := models.GetAttachmentByUUID(file)
  122. if err != nil {
  123. ctx.Error(400, err.Error())
  124. return
  125. }
  126. //issue 214: mod del-dataset permission
  127. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  128. ctx.Error(403)
  129. return
  130. }
  131. err = models.DeleteAttachment(attach, true)
  132. if err != nil {
  133. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  134. return
  135. }
  136. attachjson, _ := json.Marshal(attach)
  137. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  138. DeleteAllUnzipFile(attach, "")
  139. _, err = models.DeleteFileChunkById(attach.UUID)
  140. if err != nil {
  141. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  142. return
  143. }
  144. ctx.JSON(200, map[string]string{
  145. "uuid": attach.UUID,
  146. })
  147. }
  148. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  149. dataset, err := models.GetDatasetByID(attach.DatasetID)
  150. if err != nil {
  151. log.Info("query dataset error")
  152. } else {
  153. repo, err := models.GetRepositoryByID(dataset.RepoID)
  154. if err != nil {
  155. log.Info("query repo error.")
  156. } else {
  157. repo.GetOwner()
  158. if ctx.User != nil {
  159. if repo.Owner.IsOrganization() {
  160. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  161. log.Info("org user may visit the attach.")
  162. return true
  163. }
  164. }
  165. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  166. if isCollaborator {
  167. log.Info("Collaborator user may visit the attach.")
  168. return true
  169. }
  170. }
  171. }
  172. }
  173. return false
  174. }
  175. // GetAttachment serve attachements
  176. func GetAttachment(ctx *context.Context) {
  177. typeCloudBrain := ctx.QueryInt("type")
  178. err := checkTypeCloudBrain(typeCloudBrain)
  179. if err != nil {
  180. ctx.ServerError("checkTypeCloudBrain failed", err)
  181. return
  182. }
  183. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  184. if err != nil {
  185. if models.IsErrAttachmentNotExist(err) {
  186. ctx.Error(404)
  187. } else {
  188. ctx.ServerError("GetAttachmentByUUID", err)
  189. }
  190. return
  191. }
  192. repository, unitType, err := attach.LinkedRepository()
  193. if err != nil {
  194. ctx.ServerError("LinkedRepository", err)
  195. return
  196. }
  197. dataSet, err := attach.LinkedDataSet()
  198. if err != nil {
  199. ctx.ServerError("LinkedDataSet", err)
  200. return
  201. }
  202. if repository == nil && dataSet != nil {
  203. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  204. unitType = models.UnitTypeDatasets
  205. }
  206. if repository == nil { //If not linked
  207. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  208. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  209. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  210. ctx.Error(http.StatusNotFound)
  211. return
  212. }
  213. } else { //If we have the repository we check access
  214. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  215. if errPermission != nil {
  216. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  217. return
  218. }
  219. if !perm.CanRead(unitType) {
  220. ctx.Error(http.StatusNotFound)
  221. return
  222. }
  223. }
  224. if dataSet != nil {
  225. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  226. if err != nil {
  227. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  228. return
  229. }
  230. if !isPermit {
  231. ctx.Error(http.StatusNotFound)
  232. return
  233. }
  234. }
  235. //If we have matched and access to release or issue
  236. if setting.Attachment.StoreType == storage.MinioStorageType {
  237. url := ""
  238. if typeCloudBrain == models.TypeCloudBrainOne {
  239. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  240. if err != nil {
  241. ctx.ServerError("PresignedGetURL", err)
  242. return
  243. }
  244. } else {
  245. if setting.PROXYURL != "" {
  246. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  247. log.Info("return url=" + url)
  248. } else {
  249. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  250. if err != nil {
  251. ctx.ServerError("ObsGetPreSignedUrl", err)
  252. return
  253. }
  254. }
  255. }
  256. if err = increaseDownloadCount(attach, dataSet); err != nil {
  257. ctx.ServerError("Update", err)
  258. return
  259. }
  260. if dataSet != nil {
  261. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  262. } else {
  263. fr, err := storage.Attachments.Open(attach.RelativePath())
  264. if err != nil {
  265. ctx.ServerError("Open", err)
  266. return
  267. }
  268. defer fr.Close()
  269. if err = ServeData(ctx, attach.Name, fr); err != nil {
  270. ctx.ServerError("ServeData", err)
  271. return
  272. }
  273. }
  274. } else {
  275. fr, err := storage.Attachments.Open(attach.RelativePath())
  276. if err != nil {
  277. ctx.ServerError("Open", err)
  278. return
  279. }
  280. defer fr.Close()
  281. if err = increaseDownloadCount(attach, dataSet); err != nil {
  282. ctx.ServerError("Update", err)
  283. return
  284. }
  285. if err = ServeData(ctx, attach.Name, fr); err != nil {
  286. ctx.ServerError("ServeData", err)
  287. return
  288. }
  289. }
  290. }
  291. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  292. if err := attach.IncreaseDownloadCount(); err != nil {
  293. return err
  294. }
  295. if dataSet != nil {
  296. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  297. return err
  298. }
  299. }
  300. return nil
  301. }
  302. // Get a presigned url for put object
  303. func GetPresignedPutObjectURL(ctx *context.Context) {
  304. if !setting.Attachment.Enabled {
  305. ctx.Error(404, "attachment is not enabled")
  306. return
  307. }
  308. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  309. if err != nil {
  310. ctx.Error(400, err.Error())
  311. return
  312. }
  313. if setting.Attachment.StoreType == storage.MinioStorageType {
  314. uuid := gouuid.NewV4().String()
  315. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  316. if err != nil {
  317. ctx.ServerError("PresignedPutURL", err)
  318. return
  319. }
  320. ctx.JSON(200, map[string]string{
  321. "uuid": uuid,
  322. "url": url,
  323. })
  324. } else {
  325. ctx.Error(404, "storage type is not enabled")
  326. return
  327. }
  328. }
  329. // AddAttachment response for add attachment record
  330. func AddAttachment(ctx *context.Context) {
  331. typeCloudBrain := ctx.QueryInt("type")
  332. fileName := ctx.Query("file_name")
  333. err := checkTypeCloudBrain(typeCloudBrain)
  334. if err != nil {
  335. ctx.ServerError("checkTypeCloudBrain failed", err)
  336. return
  337. }
  338. uuid := ctx.Query("uuid")
  339. has := false
  340. if typeCloudBrain == models.TypeCloudBrainOne {
  341. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  342. if err != nil {
  343. ctx.ServerError("HasObject", err)
  344. return
  345. }
  346. } else {
  347. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  348. if err != nil {
  349. ctx.ServerError("ObsHasObject", err)
  350. return
  351. }
  352. }
  353. if !has {
  354. ctx.Error(404, "attachment has not been uploaded")
  355. return
  356. }
  357. datasetId := ctx.QueryInt64("dataset_id")
  358. dataset, err := models.GetDatasetByID(datasetId)
  359. if err != nil {
  360. ctx.Error(404, "dataset does not exist.")
  361. return
  362. }
  363. attachment, err := models.InsertAttachment(&models.Attachment{
  364. UUID: uuid,
  365. UploaderID: ctx.User.ID,
  366. IsPrivate: dataset.IsPrivate(),
  367. Name: fileName,
  368. Size: ctx.QueryInt64("size"),
  369. DatasetID: ctx.QueryInt64("dataset_id"),
  370. Type: typeCloudBrain,
  371. })
  372. if err != nil {
  373. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  374. return
  375. }
  376. if attachment.DatasetID != 0 {
  377. if isCanDecompress(attachment.Name) {
  378. if typeCloudBrain == models.TypeCloudBrainOne {
  379. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  380. if err != nil {
  381. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  382. } else {
  383. attachment.DecompressState = models.DecompressStateIng
  384. err = models.UpdateAttachment(attachment)
  385. if err != nil {
  386. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  387. }
  388. }
  389. }
  390. //todo:decompress type_two
  391. }
  392. }
  393. ctx.JSON(200, map[string]string{
  394. "result_code": "0",
  395. })
  396. }
  397. func isCanDecompress(name string) bool {
  398. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  399. return true
  400. }
  401. return false
  402. }
  403. func UpdateAttachmentDecompressState(ctx *context.Context) {
  404. uuid := ctx.Query("uuid")
  405. result := ctx.Query("result")
  406. attach, err := models.GetAttachmentByUUID(uuid)
  407. if err != nil {
  408. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  409. return
  410. }
  411. if result == DecompressSuccess {
  412. attach.DecompressState = models.DecompressStateDone
  413. } else if result == DecompressFailed {
  414. attach.DecompressState = models.DecompressStateFailed
  415. } else {
  416. log.Error("result is error:", result)
  417. return
  418. }
  419. err = models.UpdateAttachment(attach)
  420. if err != nil {
  421. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  422. return
  423. }
  424. log.Info("start to send msg to labelsystem ")
  425. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  426. var labelMap map[string]string
  427. labelMap = make(map[string]string)
  428. labelMap["UUID"] = uuid
  429. labelMap["Type"] = fmt.Sprint(attach.Type)
  430. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  431. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  432. labelMap["AttachName"] = attach.Name
  433. attachjson, _ := json.Marshal(labelMap)
  434. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  435. log.Info("end to send msg to labelsystem ")
  436. ctx.JSON(200, map[string]string{
  437. "result_code": "0",
  438. })
  439. }
  440. func GetSuccessChunks(ctx *context.Context) {
  441. fileMD5 := ctx.Query("md5")
  442. typeCloudBrain := ctx.QueryInt("type")
  443. fileName := ctx.Query("file_name")
  444. var chunks string
  445. err := checkTypeCloudBrain(typeCloudBrain)
  446. if err != nil {
  447. ctx.ServerError("checkTypeCloudBrain failed", err)
  448. return
  449. }
  450. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  451. if err != nil {
  452. if models.IsErrFileChunkNotExist(err) {
  453. ctx.JSON(200, map[string]string{
  454. "uuid": "",
  455. "uploaded": "0",
  456. "uploadID": "",
  457. "chunks": "",
  458. })
  459. } else {
  460. ctx.ServerError("GetFileChunkByMD5", err)
  461. }
  462. return
  463. }
  464. isExist := false
  465. if typeCloudBrain == models.TypeCloudBrainOne {
  466. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  467. if err != nil {
  468. ctx.ServerError("HasObject failed", err)
  469. return
  470. }
  471. } else {
  472. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName)
  473. if err != nil {
  474. ctx.ServerError("ObsHasObject failed", err)
  475. return
  476. }
  477. }
  478. if isExist {
  479. if fileChunk.IsUploaded == models.FileNotUploaded {
  480. log.Info("the file has been uploaded but not recorded")
  481. fileChunk.IsUploaded = models.FileUploaded
  482. if err = models.UpdateFileChunk(fileChunk); err != nil {
  483. log.Error("UpdateFileChunk failed:", err.Error())
  484. }
  485. }
  486. } else {
  487. if fileChunk.IsUploaded == models.FileUploaded {
  488. log.Info("the file has been recorded but not uploaded")
  489. fileChunk.IsUploaded = models.FileNotUploaded
  490. if err = models.UpdateFileChunk(fileChunk); err != nil {
  491. log.Error("UpdateFileChunk failed:", err.Error())
  492. }
  493. }
  494. if typeCloudBrain == models.TypeCloudBrainOne {
  495. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  496. if err != nil {
  497. log.Error("GetPartInfos failed:%v", err.Error())
  498. }
  499. } else {
  500. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName)
  501. if err != nil {
  502. log.Error("GetObsPartInfos failed:%v", err.Error())
  503. }
  504. }
  505. if err != nil {
  506. models.DeleteFileChunk(fileChunk)
  507. ctx.JSON(200, map[string]string{
  508. "uuid": "",
  509. "uploaded": "0",
  510. "uploadID": "",
  511. "chunks": "",
  512. })
  513. return
  514. }
  515. }
  516. var attachID int64
  517. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  518. if err != nil {
  519. if models.IsErrAttachmentNotExist(err) {
  520. attachID = 0
  521. } else {
  522. ctx.ServerError("GetAttachmentByUUID", err)
  523. return
  524. }
  525. } else {
  526. attachID = attach.ID
  527. }
  528. if attach == nil {
  529. ctx.JSON(200, map[string]string{
  530. "uuid": fileChunk.UUID,
  531. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  532. "uploadID": fileChunk.UploadID,
  533. "chunks": string(chunks),
  534. "attachID": "0",
  535. "datasetID": "0",
  536. "fileName": "",
  537. "datasetName": "",
  538. })
  539. return
  540. }
  541. dataset, err := models.GetDatasetByID(attach.DatasetID)
  542. if err != nil {
  543. ctx.ServerError("GetDatasetByID", err)
  544. return
  545. }
  546. ctx.JSON(200, map[string]string{
  547. "uuid": fileChunk.UUID,
  548. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  549. "uploadID": fileChunk.UploadID,
  550. "chunks": string(chunks),
  551. "attachID": strconv.Itoa(int(attachID)),
  552. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  553. "fileName": attach.Name,
  554. "datasetName": dataset.Title,
  555. })
  556. }
  557. func NewMultipart(ctx *context.Context) {
  558. if !setting.Attachment.Enabled {
  559. ctx.Error(404, "attachment is not enabled")
  560. return
  561. }
  562. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  563. if err != nil {
  564. ctx.Error(400, err.Error())
  565. return
  566. }
  567. typeCloudBrain := ctx.QueryInt("type")
  568. err = checkTypeCloudBrain(typeCloudBrain)
  569. if err != nil {
  570. ctx.ServerError("checkTypeCloudBrain failed", err)
  571. return
  572. }
  573. fileName := ctx.Query("file_name")
  574. if setting.Attachment.StoreType == storage.MinioStorageType {
  575. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  576. if totalChunkCounts > minio_ext.MaxPartsCount {
  577. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  578. return
  579. }
  580. fileSize := ctx.QueryInt64("size")
  581. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  582. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  583. return
  584. }
  585. uuid := gouuid.NewV4().String()
  586. var uploadID string
  587. if typeCloudBrain == models.TypeCloudBrainOne {
  588. uploadID, err = storage.NewMultiPartUpload(uuid)
  589. if err != nil {
  590. ctx.ServerError("NewMultipart", err)
  591. return
  592. }
  593. } else {
  594. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  595. if err != nil {
  596. ctx.ServerError("NewObsMultiPartUpload", err)
  597. return
  598. }
  599. }
  600. _, err = models.InsertFileChunk(&models.FileChunk{
  601. UUID: uuid,
  602. UserID: ctx.User.ID,
  603. UploadID: uploadID,
  604. Md5: ctx.Query("md5"),
  605. Size: fileSize,
  606. TotalChunks: totalChunkCounts,
  607. Type: typeCloudBrain,
  608. })
  609. if err != nil {
  610. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  611. return
  612. }
  613. ctx.JSON(200, map[string]string{
  614. "uuid": uuid,
  615. "uploadID": uploadID,
  616. })
  617. } else {
  618. ctx.Error(404, "storage type is not enabled")
  619. return
  620. }
  621. }
  622. func PutOBSProxyUpload(ctx *context.Context) {
  623. uuid := ctx.Query("uuid")
  624. uploadID := ctx.Query("uploadId")
  625. partNumber := ctx.QueryInt("partNumber")
  626. fileName := ctx.Query("file_name")
  627. RequestBody := ctx.Req.Body()
  628. if RequestBody == nil {
  629. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  630. return
  631. }
  632. err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  633. if err != nil {
  634. log.Info("upload error.")
  635. }
  636. }
  637. func GetOBSProxyDownload(ctx *context.Context) {
  638. uuid := ctx.Query("uuid")
  639. fileName := ctx.Query("file_name")
  640. body, err := storage.ObsDownload(uuid, fileName)
  641. if err != nil {
  642. log.Info("upload error.")
  643. } else {
  644. defer body.Close()
  645. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  646. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  647. p := make([]byte, 1024)
  648. var readErr error
  649. var readCount int
  650. // 读取对象内容
  651. for {
  652. readCount, readErr = body.Read(p)
  653. if readCount > 0 {
  654. ctx.Resp.Write(p[:readCount])
  655. //fmt.Printf("%s", p[:readCount])
  656. }
  657. if readErr != nil {
  658. break
  659. }
  660. }
  661. }
  662. }
  663. func GetMultipartUploadUrl(ctx *context.Context) {
  664. uuid := ctx.Query("uuid")
  665. uploadID := ctx.Query("uploadID")
  666. partNumber := ctx.QueryInt("chunkNumber")
  667. size := ctx.QueryInt64("size")
  668. fileName := ctx.Query("file_name")
  669. typeCloudBrain := ctx.QueryInt("type")
  670. err := checkTypeCloudBrain(typeCloudBrain)
  671. if err != nil {
  672. ctx.ServerError("checkTypeCloudBrain failed", err)
  673. return
  674. }
  675. url := ""
  676. if typeCloudBrain == models.TypeCloudBrainOne {
  677. if size > minio_ext.MinPartSize {
  678. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  679. return
  680. }
  681. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  682. if err != nil {
  683. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  684. return
  685. }
  686. } else {
  687. if setting.PROXYURL != "" {
  688. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  689. log.Info("return url=" + url)
  690. } else {
  691. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  692. if err != nil {
  693. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  694. return
  695. }
  696. log.Info("url=" + url)
  697. }
  698. }
  699. ctx.JSON(200, map[string]string{
  700. "url": url,
  701. })
  702. }
  703. func GetObsKey(ctx *context.Context) {
  704. uuid := gouuid.NewV4().String()
  705. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  706. ctx.JSON(200, map[string]string{
  707. "uuid": uuid,
  708. "key": key,
  709. "access_key_id": setting.AccessKeyID,
  710. "secret_access_key": setting.SecretAccessKey,
  711. "server": setting.Endpoint,
  712. "bucket": setting.Bucket,
  713. })
  714. }
  715. func CompleteMultipart(ctx *context.Context) {
  716. uuid := ctx.Query("uuid")
  717. uploadID := ctx.Query("uploadID")
  718. typeCloudBrain := ctx.QueryInt("type")
  719. fileName := ctx.Query("file_name")
  720. log.Warn("uuid:" + uuid)
  721. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  722. err := checkTypeCloudBrain(typeCloudBrain)
  723. if err != nil {
  724. ctx.ServerError("checkTypeCloudBrain failed", err)
  725. return
  726. }
  727. fileChunk, err := models.GetFileChunkByUUID(uuid)
  728. if err != nil {
  729. if models.IsErrFileChunkNotExist(err) {
  730. ctx.Error(404)
  731. } else {
  732. ctx.ServerError("GetFileChunkByUUID", err)
  733. }
  734. return
  735. }
  736. if typeCloudBrain == models.TypeCloudBrainOne {
  737. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  738. if err != nil {
  739. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  740. return
  741. }
  742. } else {
  743. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  744. if err != nil {
  745. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  746. return
  747. }
  748. }
  749. fileChunk.IsUploaded = models.FileUploaded
  750. err = models.UpdateFileChunk(fileChunk)
  751. if err != nil {
  752. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  753. return
  754. }
  755. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  756. log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
  757. attachment, err := models.InsertAttachment(&models.Attachment{
  758. UUID: uuid,
  759. UploaderID: ctx.User.ID,
  760. IsPrivate: dataset.IsPrivate(),
  761. Name: fileName,
  762. Size: ctx.QueryInt64("size"),
  763. DatasetID: ctx.QueryInt64("dataset_id"),
  764. Description: ctx.Query("description"),
  765. Type: typeCloudBrain,
  766. })
  767. if err != nil {
  768. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  769. return
  770. }
  771. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  772. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
  773. if attachment.DatasetID != 0 {
  774. if isCanDecompress(attachment.Name) {
  775. if typeCloudBrain == models.TypeCloudBrainOne {
  776. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  777. if err != nil {
  778. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  779. } else {
  780. attachment.DecompressState = models.DecompressStateIng
  781. err = models.UpdateAttachment(attachment)
  782. if err != nil {
  783. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  784. }
  785. }
  786. }
  787. if typeCloudBrain == models.TypeCloudBrainTwo {
  788. attachjson, _ := json.Marshal(attachment)
  789. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  790. }
  791. } else {
  792. var labelMap map[string]string
  793. labelMap = make(map[string]string)
  794. labelMap["UUID"] = uuid
  795. labelMap["Type"] = fmt.Sprint(attachment.Type)
  796. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  797. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  798. labelMap["AttachName"] = attachment.Name
  799. attachjson, _ := json.Marshal(labelMap)
  800. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  801. }
  802. }
  803. ctx.JSON(200, map[string]string{
  804. "result_code": "0",
  805. })
  806. }
  807. func UpdateMultipart(ctx *context.Context) {
  808. uuid := ctx.Query("uuid")
  809. partNumber := ctx.QueryInt("chunkNumber")
  810. etag := ctx.Query("etag")
  811. fileChunk, err := models.GetFileChunkByUUID(uuid)
  812. if err != nil {
  813. if models.IsErrFileChunkNotExist(err) {
  814. ctx.Error(404)
  815. } else {
  816. ctx.ServerError("GetFileChunkByUUID", err)
  817. }
  818. return
  819. }
  820. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  821. err = models.UpdateFileChunk(fileChunk)
  822. if err != nil {
  823. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  824. return
  825. }
  826. ctx.JSON(200, map[string]string{
  827. "result_code": "0",
  828. })
  829. }
  830. func HandleUnDecompressAttachment() {
  831. attachs, err := models.GetUnDecompressAttachments()
  832. if err != nil {
  833. log.Error("GetUnDecompressAttachments failed:", err.Error())
  834. return
  835. }
  836. for _, attach := range attachs {
  837. if attach.Type == models.TypeCloudBrainOne {
  838. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  839. if err != nil {
  840. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  841. } else {
  842. attach.DecompressState = models.DecompressStateIng
  843. err = models.UpdateAttachment(attach)
  844. if err != nil {
  845. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  846. }
  847. }
  848. } else if attach.Type == models.TypeCloudBrainTwo {
  849. attachjson, _ := json.Marshal(attach)
  850. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  851. }
  852. }
  853. return
  854. }
  855. func QueryAllPublicDataset(ctx *context.Context) {
  856. attachs, err := models.GetAllPublicAttachments()
  857. if err != nil {
  858. ctx.JSON(200, map[string]string{
  859. "result_code": "-1",
  860. "error_msg": err.Error(),
  861. "data": "",
  862. })
  863. return
  864. }
  865. queryDatasets(ctx, attachs)
  866. }
  867. func QueryPrivateDataset(ctx *context.Context) {
  868. username := ctx.Params(":username")
  869. attachs, err := models.GetPrivateAttachments(username)
  870. if err != nil {
  871. ctx.JSON(200, map[string]string{
  872. "result_code": "-1",
  873. "error_msg": err.Error(),
  874. "data": "",
  875. })
  876. return
  877. }
  878. for _, attach := range attachs {
  879. attach.Name = username
  880. }
  881. queryDatasets(ctx, attachs)
  882. }
  883. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  884. var datasets []CloudBrainDataset
  885. if len(attachs) == 0 {
  886. log.Info("dataset is null")
  887. ctx.JSON(200, map[string]string{
  888. "result_code": "0",
  889. "error_msg": "",
  890. "data": "",
  891. })
  892. return
  893. }
  894. for _, attch := range attachs {
  895. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  896. if err != nil || !has {
  897. continue
  898. }
  899. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  900. attch.Attachment.Name,
  901. setting.Attachment.Minio.RealPath +
  902. setting.Attachment.Minio.Bucket + "/" +
  903. setting.Attachment.Minio.BasePath +
  904. models.AttachmentRelativePath(attch.UUID) +
  905. attch.UUID,
  906. attch.Name,
  907. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  908. }
  909. data, err := json.Marshal(datasets)
  910. if err != nil {
  911. log.Error("json.Marshal failed:", err.Error())
  912. ctx.JSON(200, map[string]string{
  913. "result_code": "-1",
  914. "error_msg": err.Error(),
  915. "data": "",
  916. })
  917. return
  918. }
  919. ctx.JSON(200, map[string]string{
  920. "result_code": "0",
  921. "error_msg": "",
  922. "data": string(data),
  923. })
  924. return
  925. }
  926. func checkTypeCloudBrain(typeCloudBrain int) error {
  927. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  928. log.Error("type error:", typeCloudBrain)
  929. return errors.New("type error")
  930. }
  931. return nil
  932. }