You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dataset.go 11 kB

3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. package models
  2. import (
  3. "code.gitea.io/gitea/modules/log"
  4. "errors"
  5. "fmt"
  6. "sort"
  7. "code.gitea.io/gitea/modules/timeutil"
  8. "xorm.io/builder"
  9. )
  10. const (
  11. DatasetStatusPrivate int32 = iota
  12. DatasetStatusPublic
  13. DatasetStatusDeleted
  14. )
  15. type Dataset struct {
  16. ID int64 `xorm:"pk autoincr"`
  17. Title string `xorm:"INDEX NOT NULL"`
  18. Status int32 `xorm:"INDEX"` // normal_private: 0, pulbic: 1, is_delete: 2
  19. Category string
  20. Description string `xorm:"TEXT"`
  21. DownloadTimes int64
  22. NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
  23. License string
  24. Task string
  25. ReleaseID int64 `xorm:"INDEX"`
  26. UserID int64 `xorm:"INDEX"`
  27. RepoID int64 `xorm:"INDEX"`
  28. Repo *Repository `xorm:"-"`
  29. CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
  30. UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
  31. User *User `xorm:"-"`
  32. Attachments []*Attachment `xorm:"-"`
  33. }
  34. type DatasetWithStar struct {
  35. Dataset
  36. IsStaring bool
  37. }
  38. func (d *Dataset) IsPrivate() bool {
  39. switch d.Status {
  40. case DatasetStatusPrivate:
  41. return true
  42. case DatasetStatusPublic:
  43. return false
  44. case DatasetStatusDeleted:
  45. return false
  46. default:
  47. return false
  48. }
  49. }
  50. type DatasetList []*Dataset
  51. func (datasets DatasetList) loadAttributes(e Engine) error {
  52. if len(datasets) == 0 {
  53. return nil
  54. }
  55. set := make(map[int64]struct{})
  56. userIdSet := make(map[int64]struct{})
  57. datasetIDs := make([]int64, len(datasets))
  58. for i := range datasets {
  59. userIdSet[datasets[i].UserID] = struct{}{}
  60. set[datasets[i].RepoID] = struct{}{}
  61. datasetIDs[i] = datasets[i].ID
  62. }
  63. // Load owners.
  64. users := make(map[int64]*User, len(userIdSet))
  65. repos := make(map[int64]*Repository, len(set))
  66. if err := e.
  67. Where("id > 0").
  68. In("id", keysInt64(userIdSet)).
  69. Find(&users); err != nil {
  70. return fmt.Errorf("find users: %v", err)
  71. }
  72. if err := e.
  73. Where("id > 0").
  74. In("id", keysInt64(set)).
  75. Find(&repos); err != nil {
  76. return fmt.Errorf("find repos: %v", err)
  77. }
  78. for i := range datasets {
  79. datasets[i].User = users[datasets[i].UserID]
  80. datasets[i].Repo = repos[datasets[i].RepoID]
  81. }
  82. return nil
  83. }
  84. type SearchDatasetOptions struct {
  85. Keyword string
  86. OwnerID int64
  87. RepoID int64
  88. IncludePublic bool
  89. Category string
  90. Task string
  91. License string
  92. ListOptions
  93. SearchOrderBy
  94. IsOwner bool
  95. }
  96. func CreateDataset(dataset *Dataset) (err error) {
  97. sess := x.NewSession()
  98. defer sess.Close()
  99. if err := sess.Begin(); err != nil {
  100. return err
  101. }
  102. datasetByRepoId := &Dataset{RepoID: dataset.RepoID}
  103. has, err := sess.Get(datasetByRepoId)
  104. if err != nil {
  105. return err
  106. }
  107. if has {
  108. return fmt.Errorf("The dataset already exists.")
  109. }
  110. if _, err = sess.Insert(dataset); err != nil {
  111. return err
  112. }
  113. return sess.Commit()
  114. }
  115. func SearchDataset(opts *SearchDatasetOptions) (DatasetList, int64, error) {
  116. cond := SearchDatasetCondition(opts)
  117. return SearchDatasetByCondition(opts, cond)
  118. }
  119. func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond {
  120. var cond = builder.NewCond()
  121. cond = cond.And(builder.Neq{"dataset.status": DatasetStatusDeleted})
  122. cond = generateFilterCond(opts, cond)
  123. if opts.RepoID > 0 {
  124. cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID})
  125. }
  126. if opts.IncludePublic {
  127. cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
  128. cond = cond.And(builder.Eq{"attachment.is_private": false})
  129. if opts.OwnerID > 0 {
  130. subCon := builder.NewCond()
  131. subCon = subCon.And(builder.Eq{"repository.owner_id": opts.OwnerID})
  132. subCon = generateFilterCond(opts, subCon)
  133. cond = cond.Or(subCon)
  134. }
  135. } else if opts.OwnerID > 0 {
  136. cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID})
  137. if !opts.IsOwner {
  138. cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
  139. cond = cond.And(builder.Eq{"attachment.is_private": false})
  140. }
  141. }
  142. return cond
  143. }
  144. func generateFilterCond(opts *SearchDatasetOptions, cond builder.Cond) builder.Cond {
  145. if len(opts.Keyword) > 0 {
  146. cond = cond.And(builder.Or(builder.Like{"dataset.title", opts.Keyword}, builder.Like{"dataset.description", opts.Keyword}))
  147. }
  148. if len(opts.Category) > 0 {
  149. cond = cond.And(builder.Eq{"dataset.category": opts.Category})
  150. }
  151. if len(opts.Task) > 0 {
  152. cond = cond.And(builder.Eq{"dataset.task": opts.Task})
  153. }
  154. if len(opts.License) > 0 {
  155. cond = cond.And(builder.Eq{"dataset.license": opts.License})
  156. }
  157. return cond
  158. }
  159. func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (DatasetList, int64, error) {
  160. if opts.Page <= 0 {
  161. opts.Page = 1
  162. }
  163. var err error
  164. sess := x.NewSession()
  165. defer sess.Close()
  166. datasets := make(DatasetList, 0, opts.PageSize)
  167. selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars"
  168. count, err := sess.Distinct("dataset.id").Join("INNER", "repository", "repository.id = dataset.repo_id").
  169. Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
  170. Where(cond).Count(new(Dataset))
  171. if err != nil {
  172. return nil, 0, fmt.Errorf("Count: %v", err)
  173. }
  174. sess.Select(selectColumnsSql).Join("INNER", "repository", "repository.id = dataset.repo_id").
  175. Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
  176. Where(cond).OrderBy(opts.SearchOrderBy.String())
  177. if opts.PageSize > 0 {
  178. sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
  179. }
  180. if err = sess.Find(&datasets); err != nil {
  181. return nil, 0, fmt.Errorf("Dataset: %v", err)
  182. }
  183. if err = datasets.loadAttributes(sess); err != nil {
  184. return nil, 0, fmt.Errorf("LoadAttributes: %v", err)
  185. }
  186. return datasets, count, nil
  187. }
  188. type datasetMetaSearch struct {
  189. ID []int64
  190. Rel []*Dataset
  191. }
  192. func (s datasetMetaSearch) Len() int {
  193. return len(s.ID)
  194. }
  195. func (s datasetMetaSearch) Swap(i, j int) {
  196. s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
  197. s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
  198. }
  199. func (s datasetMetaSearch) Less(i, j int) bool {
  200. return s.ID[i] < s.ID[j]
  201. }
  202. func GetDatasetAttachments(typeCloudBrain int, isSigned bool, user *User, rels ...*Dataset) (err error) {
  203. return getDatasetAttachments(x, typeCloudBrain, isSigned, user, rels...)
  204. }
  205. func getDatasetAttachments(e Engine, typeCloudBrain int, isSigned bool, user *User, rels ...*Dataset) (err error) {
  206. if len(rels) == 0 {
  207. return
  208. }
  209. // To keep this efficient as possible sort all datasets by id,
  210. // select attachments by dataset id,
  211. // then merge join them
  212. // Sort
  213. var sortedRels = datasetMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Dataset, len(rels))}
  214. var attachments []*Attachment
  215. for index, element := range rels {
  216. element.Attachments = []*Attachment{}
  217. sortedRels.ID[index] = element.ID
  218. sortedRels.Rel[index] = element
  219. }
  220. sort.Sort(sortedRels)
  221. // Select attachments
  222. if typeCloudBrain == -1 {
  223. err = e.
  224. Asc("dataset_id").
  225. In("dataset_id", sortedRels.ID).
  226. Find(&attachments, Attachment{})
  227. if err != nil {
  228. return err
  229. }
  230. } else {
  231. err = e.
  232. Asc("dataset_id").
  233. In("dataset_id", sortedRels.ID).
  234. And("type = ?", typeCloudBrain).
  235. Find(&attachments, Attachment{})
  236. if err != nil {
  237. return err
  238. }
  239. }
  240. // merge join
  241. var currentIndex = 0
  242. for _, attachment := range attachments {
  243. for sortedRels.ID[currentIndex] < attachment.DatasetID {
  244. currentIndex++
  245. }
  246. fileChunks := make([]*FileChunk, 0, 10)
  247. err = e.
  248. Where("uuid = ?", attachment.UUID).
  249. Find(&fileChunks)
  250. if err != nil {
  251. return err
  252. }
  253. if len(fileChunks) > 0 {
  254. attachment.Md5 = fileChunks[0].Md5
  255. } else {
  256. log.Error("has attachment record, but has no file_chunk record")
  257. attachment.Md5 = "no_record"
  258. }
  259. attachment.CanDel = CanDelAttachment(isSigned, user, attachment)
  260. sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment)
  261. }
  262. return
  263. }
  264. // AddDatasetAttachments adds a Dataset attachments
  265. func AddDatasetAttachments(DatasetID int64, attachmentUUIDs []string) (err error) {
  266. // Check attachments
  267. attachments, err := GetAttachmentsByUUIDs(attachmentUUIDs)
  268. if err != nil {
  269. return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %v", attachmentUUIDs, err)
  270. }
  271. for i := range attachments {
  272. attachments[i].DatasetID = DatasetID
  273. // No assign value could be 0, so ignore AllCols().
  274. if _, err = x.ID(attachments[i].ID).Update(attachments[i]); err != nil {
  275. return fmt.Errorf("update attachment [%d]: %v", attachments[i].ID, err)
  276. }
  277. }
  278. return
  279. }
  280. func UpdateDataset(ctx DBContext, rel *Dataset) error {
  281. _, err := ctx.e.ID(rel.ID).AllCols().Update(rel)
  282. return err
  283. }
  284. // GetDatasetByID returns Dataset with given ID.
  285. func GetDatasetByID(id int64) (*Dataset, error) {
  286. rel := new(Dataset)
  287. has, err := x.
  288. ID(id).
  289. Get(rel)
  290. if err != nil {
  291. return nil, err
  292. } else if !has {
  293. return nil, ErrDatasetNotExist{id}
  294. }
  295. return rel, nil
  296. }
  297. func GetDatasetByRepo(repo *Repository) (*Dataset, error) {
  298. dataset := &Dataset{RepoID: repo.ID}
  299. has, err := x.Get(dataset)
  300. if err != nil {
  301. return nil, err
  302. }
  303. if has {
  304. return dataset, nil
  305. } else {
  306. return nil, ErrNotExist{repo.ID}
  307. }
  308. }
  309. func GetDatasetStarByUser(user *User) ([]*DatasetStar, error) {
  310. datasetStars := make([]*DatasetStar, 0)
  311. err := x.Cols("id", "uid", "dataset_id", "created_unix").Where("uid=?", user.ID).Find(&datasetStars)
  312. return datasetStars, err
  313. }
  314. func DeleteDataset(datasetID int64, uid int64) error {
  315. var err error
  316. sess := x.NewSession()
  317. defer sess.Close()
  318. if err = sess.Begin(); err != nil {
  319. return err
  320. }
  321. dataset := &Dataset{ID: datasetID, UserID: uid}
  322. has, err := sess.Get(dataset)
  323. if err != nil {
  324. return err
  325. } else if !has {
  326. return errors.New("not found")
  327. }
  328. if cnt, err := sess.ID(datasetID).Delete(new(Dataset)); err != nil {
  329. return err
  330. } else if cnt != 1 {
  331. return errors.New("not found")
  332. }
  333. if err = sess.Commit(); err != nil {
  334. sess.Close()
  335. return fmt.Errorf("Commit: %v", err)
  336. }
  337. return nil
  338. }
  339. func GetOwnerDatasetByID(id int64, user *User) (*Dataset, error) {
  340. dataset, err := GetDatasetByID(id)
  341. if err != nil {
  342. return nil, err
  343. }
  344. if !dataset.IsPrivate() {
  345. return dataset, nil
  346. }
  347. if dataset.IsPrivate() && user != nil && user.ID == dataset.UserID {
  348. return dataset, nil
  349. }
  350. return nil, errors.New("dataset not fount")
  351. }
  352. func IncreaseDownloadCount(datasetID int64) error {
  353. // Update download count.
  354. if _, err := x.Exec("UPDATE `dataset` SET download_times=download_times+1 WHERE id=?", datasetID); err != nil {
  355. return fmt.Errorf("increase dataset count: %v", err)
  356. }
  357. return nil
  358. }