Browse Source

Merge pull request 'V20220926版本合并' (#2979) from V20220926 into develop

Reviewed-on: https://git.openi.org.cn/OpenI/aiforge/pulls/2979
tags/v1.22.9.2
ychao_1983 2 years ago
parent
commit
abb38f5ab3
100 changed files with 6108 additions and 533 deletions
  1. +101
    -0
      models/action.go
  2. +165
    -1
      models/action_list.go
  3. +24
    -0
      models/ai_model_manage.go
  4. +8
    -0
      models/attachment.go
  5. +122
    -21
      models/cloudbrain.go
  6. +27
    -0
      models/cloudbrain_spec.go
  7. +1
    -0
      models/cloudbrain_static.go
  8. +24
    -0
      models/error.go
  9. +22
    -0
      models/helper.go
  10. +184
    -0
      models/limit_config.go
  11. +9
    -0
      models/models.go
  12. +142
    -0
      models/point_account.go
  13. +21
    -0
      models/point_account_log.go
  14. +6
    -0
      models/repo.go
  15. +19
    -2
      models/repo_watch.go
  16. +79
    -0
      models/reward_admin_log.go
  17. +485
    -0
      models/reward_operate_record.go
  18. +115
    -0
      models/reward_periodic_task.go
  19. +44
    -0
      models/task_accomplish_log.go
  20. +374
    -0
      models/task_config.go
  21. +4
    -0
      models/user.go
  22. +72
    -5
      models/user_business_analysis.go
  23. +14
    -7
      models/user_business_struct.go
  24. +102
    -0
      models/user_invitation.go
  25. +4
    -0
      models/wechat_bind.go
  26. +5
    -0
      modules/auth/cloudbrain.go
  27. +5
    -0
      modules/auth/grampus.go
  28. +6
    -0
      modules/auth/modelarts.go
  29. +1
    -1
      modules/auth/user_form.go
  30. +11
    -8
      modules/auth/wechat/access_token.go
  31. +3
    -3
      modules/auth/wechat/bind.go
  32. +2
    -0
      modules/auth/wechat/client.go
  33. +14
    -0
      modules/cloudbrain/cloudbrain.go
  34. +8
    -4
      modules/cloudbrain/resty.go
  35. +21
    -0
      modules/context/point.go
  36. +26
    -0
      modules/cron/tasks_basic.go
  37. +25
    -1
      modules/dataset/dataset.go
  38. +22
    -0
      modules/eventsource/manager_run.go
  39. +56
    -5
      modules/grampus/grampus.go
  40. +6
    -5
      modules/grampus/resty.go
  41. +18
    -2
      modules/modelarts/modelarts.go
  42. +76
    -0
      modules/notification/action/action.go
  43. +6
    -0
      modules/notification/base/notifier.go
  44. +18
    -0
      modules/notification/base/null.go
  45. +38
    -0
      modules/notification/notification.go
  46. +27
    -0
      modules/notification/reward/point.go
  47. +83
    -1
      modules/redis/redis_client/client.go
  48. +17
    -0
      modules/redis/redis_key/account_redis_key.go
  49. +7
    -0
      modules/redis/redis_key/cloudbrain_redis_key.go
  50. +2
    -0
      modules/redis/redis_key/key_base.go
  51. +26
    -0
      modules/redis/redis_key/limit_redis_key.go
  52. +21
    -0
      modules/redis/redis_key/reward_redis_key.go
  53. +10
    -0
      modules/redis/redis_key/serial_redis_key.go
  54. +14
    -0
      modules/redis/redis_key/task_redis_key.go
  55. +16
    -9
      modules/redis/redis_lock/lock.go
  56. +45
    -24
      modules/setting/setting.go
  57. +1
    -1
      modules/templates/helper.go
  58. +10
    -0
      modules/util/uuid_util.go
  59. +32
    -5
      options/locale/locale_en-US.ini
  60. +32
    -3
      options/locale/locale_zh-CN.ini
  61. +1
    -1
      package.json
  62. +13
    -2
      public/home/home.js
  63. +3
    -0
      routers/admin/dataset.go
  64. +14
    -0
      routers/api/v1/api.go
  65. +5
    -1
      routers/api/v1/repo/cloudbrain.go
  66. +2
    -0
      routers/api/v1/repo/cloudbrain_dashboard.go
  67. +1
    -0
      routers/authentication/wechat.go
  68. +1
    -1
      routers/authentication/wechat_event.go
  69. +15
    -0
      routers/home.go
  70. +5
    -0
      routers/image/image.go
  71. +49
    -24
      routers/repo/ai_model_manage.go
  72. +153
    -11
      routers/repo/cloudbrain.go
  73. +311
    -84
      routers/repo/grampus.go
  74. +158
    -254
      routers/repo/modelarts.go
  75. +440
    -0
      routers/repo/user_invitation.go
  76. +24
    -0
      routers/reward/point/account.go
  77. +45
    -0
      routers/reward/point/limit.go
  78. +170
    -0
      routers/reward/point/point.go
  79. +44
    -10
      routers/routes/routes.go
  80. +68
    -0
      routers/task/config.go
  81. +15
    -0
      routers/task/task.go
  82. +107
    -0
      routers/user/Invitation.go
  83. +28
    -24
      routers/user/auth.go
  84. +2
    -0
      routers/user/setting/profile.go
  85. +1
    -1
      services/phone/phone.go
  86. +22
    -0
      services/repository/repository.go
  87. +50
    -0
      services/reward/admin_operate.go
  88. +145
    -0
      services/reward/cloudbrain_deduct.go
  89. +100
    -0
      services/reward/limiter/config.go
  90. +258
    -0
      services/reward/limiter/limiter.go
  91. +54
    -0
      services/reward/notify.go
  92. +278
    -0
      services/reward/operator.go
  93. +131
    -0
      services/reward/period_task.go
  94. +150
    -0
      services/reward/point/account/point_account.go
  95. +65
    -0
      services/reward/point/point_operate.go
  96. +47
    -0
      services/reward/record.go
  97. +28
    -0
      services/reward/serial.go
  98. +14
    -12
      services/socketwrap/clientManager.go
  99. +50
    -0
      services/task/period/handler.go
  100. +163
    -0
      services/task/task.go

+ 101
- 0
models/action.go View File

@@ -60,6 +60,12 @@ const (
ActionCreateGPUTrainTask //31
ActionCreateGrampusNPUTrainTask //32
ActionCreateGrampusGPUTrainTask //33
ActionBindWechat //34
ActionDatasetRecommended //35
ActionCreateImage //36
ActionImageRecommend //37
ActionChangeUserAvatar //38

)

// Action represents user operation type and other information to
@@ -81,6 +87,19 @@ type Action struct {
IsTransformed bool `xorm:"INDEX NOT NULL DEFAULT false"`
Content string `xorm:"TEXT"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
Cloudbrain *Cloudbrain `xorm:"-"`
}

type ActionShow struct {
OpType ActionType
TaskType TaskType
RepoLink string
ShortRepoFullDisplayName string
Content string
RefName string
IssueInfos []string
CommentLink string
Cloudbrain *CloudbrainShow4Action
}

// GetOpType gets the ActionType of this action.
@@ -218,6 +237,40 @@ func (a *Action) GetRepoLink() string {
return "/" + a.GetRepoPath()
}

func (a *Action) ToShow() *ActionShow {
actionShow := &ActionShow{}
actionShow.OpType = a.OpType
actionShow.TaskType = GetTaskTypeFromAction(a.OpType)
actionShow.Content = a.Content
actionShow.RefName = a.RefName

if strings.Contains(a.Content, "|") && a.IsIssueAction() {
actionShow.IssueInfos = a.GetIssueInfos()
}

if a.Repo != nil {
actionShow.RepoLink = a.GetRepoLink()
actionShow.ShortRepoFullDisplayName = a.ShortRepoFullDisplayName()
}
if a.Comment != nil {
actionShow.CommentLink = a.GetCommentLink()
}

if a.Cloudbrain != nil {
c := &CloudbrainShow4Action{
ID: a.Cloudbrain.ID,
JobID: a.Cloudbrain.JobID,
Type: a.Cloudbrain.Type,
JobType: a.Cloudbrain.JobType,
DisplayJobName: a.Cloudbrain.DisplayJobName,
ComputeResource: a.Cloudbrain.ComputeResource,
}
actionShow.Cloudbrain = c
}

return actionShow
}

// GetRepositoryFromMatch returns a *Repository from a username and repo strings
func GetRepositoryFromMatch(ownerName string, repoName string) (*Repository, error) {
var err error
@@ -315,6 +368,39 @@ func (a *Action) GetIssueContent() string {
return issue.Content
}

func (a *Action) IsCloudbrainAction() bool {
switch a.OpType {
case ActionCreateDebugGPUTask,
ActionCreateDebugNPUTask,
ActionCreateTrainTask,
ActionCreateInferenceTask,
ActionCreateBenchMarkTask,
ActionCreateGPUTrainTask,
ActionCreateGrampusNPUTrainTask,
ActionCreateGrampusGPUTrainTask:
return true
}
return false
}

func (a *Action) IsIssueAction() bool {
switch a.OpType {
case ActionCreateIssue,
ActionCloseIssue,
ActionClosePullRequest,
ActionReopenIssue,
ActionReopenPullRequest,
ActionCommentPull,
ActionCommentIssue,
ActionCreatePullRequest,
ActionApprovePullRequest,
ActionRejectPullRequest,
ActionMergePullRequest:
return true
}
return false
}

// GetFeedsOptions options for retrieving feeds
type GetFeedsOptions struct {
RequestedUser *User // the user we want activity for
@@ -404,3 +490,18 @@ func GetUnTransformedActions() ([]*Action, error) {
Find(&actions)
return actions, err
}

func GetActionByIds(ids []int64) ([]*Action, error) {
if len(ids) == 0 {
return nil, nil
}
actions := make([]*Action, 0)
err := x.In("id", ids).Find(&actions)
if err != nil {
return nil, err
}
if err := ActionList(actions).LoadAllAttributes(); err != nil {
return nil, fmt.Errorf("ActionList loadAttributes: %v", err)
}
return actions, nil
}

+ 165
- 1
models/action_list.go View File

@@ -4,7 +4,11 @@

package models

import "fmt"
import (
"fmt"
"strconv"
"xorm.io/builder"
)

// ActionList defines a list of actions
type ActionList []*Action
@@ -26,6 +30,9 @@ func (actions ActionList) loadUsers(e Engine) ([]*User, error) {

userIDs := actions.getUserIDs()
userMaps := make(map[int64]*User, len(userIDs))
if len(userIDs) == 0 {
return make([]*User, 0), nil
}
err := e.
In("id", userIDs).
Find(&userMaps)
@@ -61,6 +68,9 @@ func (actions ActionList) loadRepositories(e Engine) ([]*Repository, error) {

repoIDs := actions.getRepoIDs()
repoMaps := make(map[int64]*Repository, len(repoIDs))
if len(repoIDs) == 0 {
return make([]*Repository, 0), nil
}
err := e.
In("id", repoIDs).
Find(&repoMaps)
@@ -79,6 +89,133 @@ func (actions ActionList) LoadRepositories() ([]*Repository, error) {
return actions.loadRepositories(x)
}

func (actions ActionList) getCommentIDs() []int64 {
commentIDs := make(map[int64]struct{}, len(actions))
for _, action := range actions {
if action.CommentID == 0 {
continue
}
if _, ok := commentIDs[action.CommentID]; !ok {
commentIDs[action.CommentID] = struct{}{}
}
}
return keysInt64(commentIDs)
}

func (actions ActionList) loadComments(e Engine) ([]*Comment, error) {
if len(actions) == 0 {
return nil, nil
}

commentIDs := actions.getCommentIDs()

commentMaps := make(map[int64]*Comment, len(commentIDs))
if len(commentIDs) == 0 {
return make([]*Comment, 0), nil
}
err := e.
In("id", commentIDs).
Find(&commentMaps)
if err != nil {
return nil, fmt.Errorf("find comment: %v", err)
}

for _, action := range actions {
if action.CommentID > 0 {
action.Comment = commentMaps[action.CommentID]
}
}
return valuesComment(commentMaps), nil
}

// LoadComments loads actions' all comments
func (actions ActionList) LoadComments() ([]*Comment, error) {
return actions.loadComments(x)
}

func (actions ActionList) getCloudbrainIDs() []int64 {
cloudbrainIDs := make(map[int64]struct{}, 0)
for _, action := range actions {
if !action.IsCloudbrainAction() {
continue
}
cloudbrainId, _ := strconv.ParseInt(action.Content, 10, 64)
if _, ok := cloudbrainIDs[cloudbrainId]; !ok {
cloudbrainIDs[cloudbrainId] = struct{}{}
}
}
return keysInt64(cloudbrainIDs)
}

func (actions ActionList) getCloudbrainJobIDs() []string {
cloudbrainJobIDs := make(map[string]struct{}, 0)
for _, action := range actions {
if !action.IsCloudbrainAction() {
continue
}
if _, ok := cloudbrainJobIDs[action.Content]; !ok {
cloudbrainJobIDs[action.Content] = struct{}{}
}
}
return keysString(cloudbrainJobIDs)
}

func (actions ActionList) loadCloudbrains(e Engine) ([]*Cloudbrain, error) {
if len(actions) == 0 {
return nil, nil
}
cloudbrainIDs := actions.getCloudbrainIDs()
cloudbrainJobIDs := actions.getCloudbrainJobIDs()

cloudbrainMaps := make(map[int64]*Cloudbrain, len(cloudbrainIDs))
if len(cloudbrainIDs) == 0 {
return make([]*Cloudbrain, 0), nil
}
//由于各个类型的云脑任务在发布action的时候,content字段保存的ID含义不同,部分取的是ID,部分取的是jobId
//所以在查询action对应的cloudbrain对象时,以这两个字段做为条件查询
cond := builder.Or(builder.In("id", cloudbrainIDs)).Or(builder.In("job_id", cloudbrainJobIDs))
err := e.
Where(cond).Unscoped().
Find(&cloudbrainMaps)
if err != nil {
return nil, fmt.Errorf("find cloudbrain: %v", err)
}

cloudBrainJobIdMap := make(map[string]*Cloudbrain, len(cloudbrainIDs))
for _, v := range cloudbrainMaps {
cloudBrainJobIdMap[v.JobID] = v
}

for _, action := range actions {
if !action.IsCloudbrainAction() {
continue
}
cloudbrainId, _ := strconv.ParseInt(action.Content, 10, 64)
if cloudbrainId > 0 {
if c, ok := cloudbrainMaps[cloudbrainId]; ok {
if c.DisplayJobName == action.RefName || c.JobName == action.RefName {
action.Cloudbrain = c
continue
}

}
}
if c, ok := cloudBrainJobIdMap[action.Content]; ok {
if c.DisplayJobName == action.RefName || c.JobName == action.RefName {
action.Cloudbrain = c
continue
}

}
}
return valuesCloudbrain(cloudbrainMaps), nil
}

// LoadComments loads actions' all comments
func (actions ActionList) LoadCloudbrains() ([]*Comment, error) {
return actions.loadComments(x)
}

// loadAttributes loads all attributes
func (actions ActionList) loadAttributes(e Engine) (err error) {
if _, err = actions.loadUsers(e); err != nil {
@@ -96,3 +233,30 @@ func (actions ActionList) loadAttributes(e Engine) (err error) {
func (actions ActionList) LoadAttributes() error {
return actions.loadAttributes(x)
}

// LoadAllAttributes loads all attributes of the actions
// compare with LoadAttributes() ,LoadAllAttributes() loads Comment and Cloudbrain attribute
func (actions ActionList) LoadAllAttributes() error {
return actions.loadAllAttributes(x)
}

// loadAllAttributes
func (actions ActionList) loadAllAttributes(e Engine) (err error) {
if _, err = actions.loadUsers(e); err != nil {
return
}

if _, err = actions.loadRepositories(e); err != nil {
return
}

if _, err = actions.loadComments(e); err != nil {
return
}

if _, err = actions.loadCloudbrains(e); err != nil {
return
}

return nil
}

+ 24
- 0
models/ai_model_manage.go View File

@@ -25,6 +25,7 @@ type AiModelManage struct {
DownloadCount int `xorm:"NOT NULL DEFAULT 0"`
Engine int64 `xorm:"NOT NULL DEFAULT 0"`
Status int `xorm:"NOT NULL DEFAULT 0"`
StatusDesc string `xorm:"varchar(500)"`
Accuracy string `xorm:"varchar(1000)"`
AttachmentId string `xorm:"NULL"`
RepoId int64 `xorm:"INDEX NULL"`
@@ -286,6 +287,23 @@ func ModifyModelDescription(id string, description string) error {
return nil
}

func ModifyModelStatus(id string, modelSize int64, status int, modelPath string, statusDesc string) error {
var sess *xorm.Session
sess = x.ID(id)
defer sess.Close()
re, err := sess.Cols("size", "status", "path", "status_desc").Update(&AiModelManage{
Size: modelSize,
Status: status,
Path: modelPath,
StatusDesc: statusDesc,
})
if err != nil {
return err
}
log.Info("success to update ModelStatus from db.re=" + fmt.Sprint((re)))
return nil
}

func ModifyModelNewProperty(id string, new int, versioncount int) error {
var sess *xorm.Session
sess = x.ID(id)
@@ -356,6 +374,12 @@ func QueryModel(opts *AiModelQueryOptions) ([]*AiModelManage, int64, error) {
)
}

if (opts.Status) >= 0 {
cond = cond.And(
builder.Eq{"ai_model_manage.status": opts.Status},
)
}

count, err := sess.Where(cond).Count(new(AiModelManage))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)


+ 8
- 0
models/attachment.go View File

@@ -701,3 +701,11 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) {

return attachments, count, nil
}

func GetAllDatasetContributorByDatasetId(datasetId int64) ([]*User, error) {
r := make([]*User, 0)
if err := x.Select("distinct(public.user.*)").Table("attachment").Join("LEFT", "user", "public.user.ID = attachment.uploader_id").Where("attachment.dataset_id = ?", datasetId).Find(&r); err != nil {
return nil, err
}
return r, nil
}

+ 122
- 21
models/cloudbrain.go View File

@@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"path"
"strconv"
"strings"
"time"
@@ -101,7 +102,8 @@ const (
ModelArtsTrainJobCheckRunningCompleted ModelArtsJobStatus = "CHECK_RUNNING_COMPLETED" //审核作业已经完成
ModelArtsTrainJobCheckFailed ModelArtsJobStatus = "CHECK_FAILED" //审核作业失败

DURATION_STR_ZERO = "00:00:00"
DURATION_STR_ZERO = "00:00:00"
CloudbrainKeyDuration = 24 * time.Hour

//grampus
GrampusStatusPending = "pending"
@@ -187,6 +189,7 @@ type Cloudbrain struct {
ModelName string //模型名称
ModelVersion string //模型版本
CkptName string //权重文件名称
PreTrainModelUrl string //预训练模型地址
ResultUrl string //推理结果的obs路径

User *User `xorm:"-"`
@@ -199,6 +202,51 @@ type Cloudbrain struct {
Spec *Specification `xorm:"-"`
}

type CloudbrainShow struct {
ID int64
JobID string
RepoFullName string
Type int
JobType string
DisplayJobName string
Duration string
ResourceSpec *Specification
ComputeResource string
AiCenter string
WorkServerNumber int
}

type CloudbrainShow4Action struct {
ID int64
JobID string
Type int
JobType string
DisplayJobName string
ComputeResource string
}

func (task *Cloudbrain) ToShow() *CloudbrainShow {
n := 1
if task.WorkServerNumber > 1 {
n = task.WorkServerNumber
}
c := &CloudbrainShow{
ID: task.ID,
JobID: task.JobID,
JobType: task.JobType,
Type: task.Type,
DisplayJobName: task.DisplayJobName,
Duration: task.TrainJobDuration,
ResourceSpec: task.Spec,
ComputeResource: task.ComputeResource,
WorkServerNumber: n,
}
if task.Repo != nil {
c.RepoFullName = task.Repo.FullName()
}
return c
}

func (task *Cloudbrain) ComputeAndSetDuration() {
var d int64
if task.StartTime == 0 {
@@ -239,7 +287,7 @@ func (task *Cloudbrain) IsRunning() bool {
}

func ConvertDurationToStr(duration int64) string {
if duration == 0 {
if duration <= 0 {
return DURATION_STR_ZERO
}
return util.AddZero(duration/3600) + ":" + util.AddZero(duration%3600/60) + ":" + util.AddZero(duration%60)
@@ -596,11 +644,23 @@ type ResourceSpecs struct {
}

type ResourceSpec struct {
Id int `json:"id"`
CpuNum int `json:"cpu"`
GpuNum int `json:"gpu"`
MemMiB int `json:"memMiB"`
ShareMemMiB int `json:"shareMemMiB"`
Id int `json:"id"`
CpuNum int `json:"cpu"`
GpuNum int `json:"gpu"`
MemMiB int `json:"memMiB"`
ShareMemMiB int `json:"shareMemMiB"`
UnitPrice int64 `json:"unitPrice"`
}

type FlavorInfos struct {
FlavorInfo []*FlavorInfo `json:"flavor_info"`
}

type FlavorInfo struct {
Id int `json:"id"`
Value string `json:"value"`
Desc string `json:"desc"`
UnitPrice int64 `json:"unitPrice"`
}

type SpecialPools struct {
@@ -1420,14 +1480,23 @@ type GrampusStopJobResponse struct {
}

type GrampusTasks struct {
Command string `json:"command"`
Name string `json:"name"`
ImageId string `json:"imageId"`
ResourceSpecId string `json:"resourceSpecId"`
ImageUrl string `json:"imageUrl"`
CenterID []string `json:"centerID"`
CenterName []string `json:"centerName"`
ReplicaNum int `json:"replicaNum"`
Command string `json:"command"`
Name string `json:"name"`
ImageId string `json:"imageId"`
ResourceSpecId string `json:"resourceSpecId"`
ImageUrl string `json:"imageUrl"`
CenterID []string `json:"centerID"`
CenterName []string `json:"centerName"`
ReplicaNum int `json:"replicaNum"`
Datasets []GrampusDataset `json:"datasets"`
Models []GrampusDataset `json:"models"`
}

type GrampusDataset struct {
Name string `json:"name"`
Bucket string `json:"bucket"`
EndPoint string `json:"endPoint"`
ObjectKey string `json:"objectKey"`
}

type CreateGrampusJobRequest struct {
@@ -2220,12 +2289,34 @@ func CloudbrainAllStatic(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, er
return cloudbrains, count, nil
}

func GetStartedCloudbrainTaskByUpdatedUnix(startTime, endTime time.Time) ([]Cloudbrain, error) {
r := make([]Cloudbrain, 0)
err := x.Where("updated_unix >= ? and updated_unix <= ? and start_time > 0", startTime.Unix(), endTime.Unix()).Unscoped().Find(&r)
if err != nil {
return nil, err
}
return r, nil
}

func GetCloudbrainByIds(ids []int64) ([]*Cloudbrain, error) {
if len(ids) == 0 {
return nil, nil
}
cloudbrains := make([]*Cloudbrain, 0)
err := x.In("id", ids).Unscoped().Find(&cloudbrains)
if err != nil {
return nil, err
}
return cloudbrains, nil
}

type DatasetInfo struct {
DataLocalPath string
Name string
FullName string
}

func GetDatasetInfo(uuidStr string) (map[string]DatasetInfo, string, error) {
func GetDatasetInfo(uuidStr string, grampusType ...string) (map[string]DatasetInfo, string, error) {
var datasetNames string
uuids := strings.Split(uuidStr, ";")
if len(uuids) > setting.MaxDatasetNum {
@@ -2258,16 +2349,26 @@ func GetDatasetInfo(uuidStr string) (map[string]DatasetInfo, string, error) {
return nil, datasetNames, errors.New("the dataset name is same")
}
}
var dataLocalPath string
if len(grampusType) > 0 {
if grampusType[0] == GPU {
dataLocalPath = setting.Attachment.Minio.BasePath + path.Join(attach.UUID[0:1], attach.UUID[1:2]) + "/" + attach.UUID
} else {
dataLocalPath = setting.BasePath + path.Join(attach.UUID[0:1], attach.UUID[1:2]) + "/" + attach.UUID + "/"
}

dataLocalPath := setting.Attachment.Minio.RealPath +
setting.Attachment.Minio.Bucket + "/" +
setting.Attachment.Minio.BasePath +
AttachmentRelativePath(attach.UUID) +
attach.UUID
} else {
dataLocalPath = setting.Attachment.Minio.RealPath +
setting.Attachment.Minio.Bucket + "/" +
setting.Attachment.Minio.BasePath +
AttachmentRelativePath(attach.UUID) +
attach.UUID
}

datasetInfos[attach.UUID] = DatasetInfo{
DataLocalPath: dataLocalPath,
Name: fileName,
FullName: attach.Name,
}
if i == 0 {
datasetNames = attach.Name


+ 27
- 0
models/cloudbrain_spec.go View File

@@ -72,6 +72,8 @@ func NewCloudBrainSpec(cloudbrainId int64, s Specification) CloudbrainSpec {
}
}

var StatusChangeChan = make(chan *Cloudbrain, 50)

func InsertCloudbrainSpec(c CloudbrainSpec) (int64, error) {
return x.Insert(&c)
}
@@ -107,3 +109,28 @@ func CountNoSpecHistoricTask() (int64, error) {
}
return n, nil
}

// GetResourceSpecMapByCloudbrainIDs
func GetResourceSpecMapByCloudbrainIDs(ids []int64) (map[int64]*Specification, error) {
specs := make([]*CloudbrainSpec, 0)
if err := x.In("cloudbrain_id", ids).Find(&specs); err != nil {
return nil, err
}
r := make(map[int64]*Specification, len(ids))
for _, s := range specs {
r[s.CloudbrainID] = s.ConvertToSpecification()
}
return r, nil
}

func GetCloudbrainTaskUnitPrice(task Cloudbrain) (int, error) {
s, err := GetCloudbrainSpecByID(task.ID)
if err != nil {
return 0, err
}
var n = 1
if task.WorkServerNumber > 1 {
n = task.WorkServerNumber
}
return s.UnitPrice * n, nil
}

+ 1
- 0
models/cloudbrain_static.go View File

@@ -34,6 +34,7 @@ type TaskDetail struct {
CardDuration string `json:"CardDuration"`
AiCenter string `json:"AiCenter"`
FlavorName string `json:"FlavorName"`
Spec *Specification `json:"Spec"`
}

func GetTodayCreatorCount(beginTime time.Time, endTime time.Time) (int64, error) {


+ 24
- 0
models/error.go View File

@@ -2012,3 +2012,27 @@ func IsErrTagNotExist(err error) bool {
_, ok := err.(ErrTagNotExist)
return ok
}

type ErrRecordNotExist struct {
}

func IsErrRecordNotExist(err error) bool {
_, ok := err.(ErrRecordNotExist)
return ok
}

func (err ErrRecordNotExist) Error() string {
return fmt.Sprintf("record not exist in database")
}

type ErrInsufficientPointsBalance struct {
}

func IsErrInsufficientPointsBalance(err error) bool {
_, ok := err.(ErrInsufficientPointsBalance)
return ok
}

func (err ErrInsufficientPointsBalance) Error() string {
return fmt.Sprintf("Insufficient points balance")
}

+ 22
- 0
models/helper.go View File

@@ -11,6 +11,13 @@ func keysInt64(m map[int64]struct{}) []int64 {
}
return keys
}
func keysString(m map[string]struct{}) []string {
var keys = make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}

func valuesRepository(m map[int64]*Repository) []*Repository {
var values = make([]*Repository, 0, len(m))
@@ -27,3 +34,18 @@ func valuesUser(m map[int64]*User) []*User {
}
return values
}

func valuesComment(m map[int64]*Comment) []*Comment {
var values = make([]*Comment, 0, len(m))
for _, v := range m {
values = append(values, v)
}
return values
}
func valuesCloudbrain(m map[int64]*Cloudbrain) []*Cloudbrain {
var values = make([]*Cloudbrain, 0, len(m))
for _, v := range m {
values = append(values, v)
}
return values
}

+ 184
- 0
models/limit_config.go View File

@@ -0,0 +1,184 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)

type LimitType string

const (
LimitTypeTask LimitType = "TASK"
LimitTypeRewardPoint LimitType = "REWARD_POINT"
)

func (l LimitType) Name() string {
switch l {
case LimitTypeTask:
return "TASK"
case LimitTypeRewardPoint:
return "REWARD_POINT"
default:
return ""
}
}

type LimitScope string

const (
LimitScopeAllUsers LimitScope = "ALL_USERS"
LimitScopeSingleUser LimitScope = "SINGLE_USER"
)

func (l LimitScope) Name() string {
switch l {
case LimitScopeAllUsers:
return "ALL_USERS"
case LimitScopeSingleUser:
return "SINGLE_USER"
default:
return ""
}
}

type LimiterRejectPolicy string

const (
JustReject LimiterRejectPolicy = "JUST_REJECT"
PermittedOnce LimiterRejectPolicy = "PERMITTED_ONCE"
FillUp LimiterRejectPolicy = "FillUp"
)

type LimitConfig struct {
ID int64 `xorm:"pk autoincr"`
Title string
RefreshRate string `xorm:"NOT NULL"`
Scope string `xorm:"NOT NULL"`
LimitNum int64 `xorm:"NOT NULL"`
LimitCode string
LimitType string `xorm:"NOT NULL"`
RelatedId int64 `xorm:"INDEX"`
CreatorId int64 `xorm:"NOT NULL"`
CreatorName string
DeleterId int64
DeleterName string
CreatedUnix timeutil.TimeStamp `xorm:"created"`
DeletedAt timeutil.TimeStamp `xorm:"deleted"`
}

type LimitConfigQueryOpts struct {
RefreshRate string
Scope LimitScope
LimitCode string
LimitType LimitType
}

type LimitConfigVO struct {
ID int64
Title string
RefreshRate string
Scope string
LimitNum int64
LimitCode string
LimitType string
Creator string
CreatedUnix timeutil.TimeStamp
}

func (l *LimitConfig) ToLimitConfigVO() *LimitConfigVO {
return &LimitConfigVO{
ID: l.ID,
Title: l.Title,
RefreshRate: l.RefreshRate,
Scope: l.Scope,
LimitNum: l.LimitNum,
LimitCode: l.LimitCode,
LimitType: l.LimitType,
Creator: l.CreatorName,
CreatedUnix: l.CreatedUnix,
}
}

func GetLimitConfigByLimitType(limitType LimitType) ([]LimitConfig, error) {
r := make([]LimitConfig, 0)
err := x.Where(" limit_type = ?", limitType.Name()).Find(&r)
if err != nil {
return nil, err
} else if len(r) == 0 {
return nil, ErrRecordNotExist{}
}
return r, nil
}

func GetLimitersByRelatedIdWithDeleted(limitType LimitType) ([]LimitConfig, error) {
r := make([]LimitConfig, 0)
err := x.Unscoped().Where(" = ?", limitType.Name()).Find(&r)
if err != nil {
return nil, err
} else if len(r) == 0 {
return nil, ErrRecordNotExist{}
}
return r, nil
}

func AddLimitConfig(l *LimitConfig) error {
sess := x.NewSession()
defer sess.Close()

//delete old limit config
cond := builder.NewCond()
cond = cond.And(builder.Eq{"limit_type": l.LimitType})
cond = cond.And(builder.Eq{"scope": l.Scope})
if l.LimitCode == "" {
subCond := builder.NewCond()
subCond = subCond.Or(builder.IsNull{"limit_code"})
subCond = subCond.Or(builder.Eq{"limit_code": ""})
cond = cond.And(subCond)
} else {
cond = cond.And(builder.Eq{"limit_code": l.LimitCode})
}
_, err := sess.Where(cond).Delete(&LimitConfig{})
if err != nil {
sess.Rollback()
return err
}

//add new config
_, err = sess.Insert(l)
if err != nil {
sess.Rollback()
return err
}

sess.Commit()
return nil
}

func DeleteLimitConfig(config LimitConfig, deleterId int64, deleterName string) error {
sess := x.NewSession()
defer sess.Close()

_, err := x.ID(config.ID).Update(&LimitConfig{DeleterName: deleterName, DeleterId: deleterId})
if err != nil {
sess.Rollback()
return err
}
_, err = x.ID(config.ID).Delete(&LimitConfig{})
if err != nil {
sess.Rollback()
return err
}
sess.Commit()
return nil
}

func GetLimitConfigById(id int64) (*LimitConfig, error) {
r := &LimitConfig{}
isOk, err := x.ID(id).Get(r)
if err != nil {
return nil, err
} else if !isOk {
return nil, nil
}
return r, nil
}

+ 9
- 0
models/models.go View File

@@ -144,6 +144,14 @@ func init() {
new(WechatBindLog),
new(OrgStatistic),
new(SearchRecord),
new(TaskConfig),
new(TaskAccomplishLog),
new(RewardOperateRecord),
new(LimitConfig),
new(RewardPeriodicTask),
new(PointAccountLog),
new(PointAccount),
new(RewardAdminLog),
new(AiModelConvert),
new(ResourceQueue),
new(ResourceSpecification),
@@ -170,6 +178,7 @@ func init() {
new(UserLoginLog),
new(UserMetrics),
new(UserAnalysisPara),
new(Invitation),
)

gonicNames := []string{"SSL", "UID"}


+ 142
- 0
models/point_account.go View File

@@ -0,0 +1,142 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)

type PointAccountStatus int

// Possible PointAccountStatus types.
const (
PointAccountNormal int = iota + 1 // 1
PointAccountFreeze // 2
PointAccountDeleted // 3
)

type PointAccount struct {
ID int64 `xorm:"pk autoincr"`
AccountCode string `xorm:"INDEX NOT NULL"`
Balance int64 `xorm:"NOT NULL DEFAULT 0"`
TotalEarned int64 `xorm:"NOT NULL DEFAULT 0"`
TotalConsumed int64 `xorm:"NOT NULL DEFAULT 0"`
UserId int64 `xorm:"INDEX NOT NULL"`
Status int `xorm:"NOT NULL"`
Version int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

func (account *PointAccount) Increase(amount int64, sourceId string) error {
sess := x.NewSession()
defer sess.Close()
sql := "update point_account set balance = balance + ?,total_earned = total_earned + ? ,version = version + 1 where account_code = ? "
_, err := sess.Exec(sql, amount, amount, account.AccountCode)
if err != nil {
sess.Rollback()
return err
}
accountLog := &PointAccountLog{
AccountCode: account.AccountCode,
UserId: account.UserId,
Type: IncreaseAccountBalance,
SourceId: sourceId,
PointsAmount: amount,
BalanceBefore: account.Balance,
BalanceAfter: account.Balance + amount,
AccountVersion: account.Version,
}
_, err = sess.Insert(accountLog)
if err != nil {
sess.Rollback()
return err
}
sess.Commit()
return nil
}

func (account *PointAccount) Decrease(amount int64, sourceId string) error {
sess := x.NewSession()
defer sess.Close()
sql := "update point_account set balance = balance - ?,total_consumed = total_consumed + ? ,version = version + 1 where account_code = ? "
_, err := sess.Exec(sql, amount, amount, account.AccountCode)
if err != nil {
sess.Rollback()
return err
}
accountLog := &PointAccountLog{
AccountCode: account.AccountCode,
UserId: account.UserId,
Type: DecreaseAccountBalance,
SourceId: sourceId,
PointsAmount: amount,
BalanceBefore: account.Balance,
BalanceAfter: account.Balance - amount,
AccountVersion: account.Version,
}
_, err = sess.Insert(accountLog)
if err != nil {
sess.Rollback()
return err
}
sess.Commit()
return nil
}

func GetAccountByUserId(userId int64) (*PointAccount, error) {
p := &PointAccount{}
has, err := x.Where("user_id = ?", userId).Get(p)
if err != nil {
return nil, err
}
if !has {
return nil, ErrRecordNotExist{}
}
return p, nil
}

func InsertAccount(tl *PointAccount) (int64, error) {
return x.Insert(tl)
}

type SearchPointAccountOpts struct {
ListOptions
Keyword string
}

type SearchPointAccountResponse struct {
Records []*UserPointAccount
PageSize int
Page int
Total int64
}

type UserPointAccount struct {
UserId int64
UserName string
Email string
Balance int64
TotalEarned int64
TotalConsumed int64
}

func (UserPointAccount) TableName() string {
return "user"
}

func GetPointAccountMapByUserIds(userIds []int64) (map[int64]*PointAccount, error) {
if len(userIds) == 0 {
return make(map[int64]*PointAccount, 0), nil
}
accounts := make([]*PointAccount, 0)
err := x.In("user_id", userIds).Find(&accounts)
if err != nil {
log.Error("GetPointAccountMapByUserIds error.%v", err)
return nil, err
}
accountMap := make(map[int64]*PointAccount, 0)
for _, v := range accounts {
accountMap[v.UserId] = v
}
return accountMap, nil
}

+ 21
- 0
models/point_account_log.go View File

@@ -0,0 +1,21 @@
package models

import "code.gitea.io/gitea/modules/timeutil"

const (
IncreaseAccountBalance = "increase"
DecreaseAccountBalance = "decrease"
)

type PointAccountLog struct {
ID int64 `xorm:"pk autoincr"`
AccountCode string `xorm:"INDEX NOT NULL"`
UserId int64 `xorm:"INDEX NOT NULL"`
Type string `xorm:"NOT NULL"`
SourceId string `xorm:"INDEX NOT NULL"`
PointsAmount int64 `xorm:"NOT NULL"`
BalanceBefore int64 `xorm:"NOT NULL"`
BalanceAfter int64 `xorm:"NOT NULL"`
AccountVersion int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
}

+ 6
- 0
models/repo.go View File

@@ -237,6 +237,12 @@ type Repository struct {
LowerAlias string `xorm:"INDEX"`
}

type RepositoryShow struct {
Name string
RepoType RepoType
Alias string
}

// SanitizedOriginalURL returns a sanitized OriginalURL
func (repo *Repository) SanitizedOriginalURL() string {
if repo.OriginalURL == "" {


+ 19
- 2
models/repo_watch.go View File

@@ -25,6 +25,7 @@ const (
)

var ActionChan = make(chan *Action, 200)
var ActionChan4Task = make(chan Action, 200)

// Watch is connection request for receiving repository notification.
type Watch struct {
@@ -182,6 +183,7 @@ func notifyWatchers(e Engine, actions ...*Action) error {
var permCode []bool
var permIssue []bool
var permPR []bool
var permDataset []bool

for _, act := range actions {
repoChanged := repo == nil || repo.ID != act.RepoID
@@ -199,6 +201,14 @@ func notifyWatchers(e Engine, actions ...*Action) error {
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new actioner: %v", err)
}
// After InsertOne(act),the act has ID
// Send the act to task chan
ActionChan4Task <- *act

// If it has nothing to do with repo, return directly
if act.Repo == nil && act.RepoID == 0 {
return nil
}

if repoChanged {
act.loadRepo()
@@ -225,12 +235,14 @@ func notifyWatchers(e Engine, actions ...*Action) error {
permCode = make([]bool, len(watchers))
permIssue = make([]bool, len(watchers))
permPR = make([]bool, len(watchers))
permDataset = make([]bool, len(watchers))
for i, watcher := range watchers {
user, err := getUserByID(e, watcher.UserID)
if err != nil {
permCode[i] = false
permIssue[i] = false
permPR[i] = false
permDataset[i] = false
continue
}
perm, err := getUserRepoPermission(e, repo, user)
@@ -238,11 +250,13 @@ func notifyWatchers(e Engine, actions ...*Action) error {
permCode[i] = false
permIssue[i] = false
permPR[i] = false
permDataset[i] = false
continue
}
permCode[i] = perm.CanRead(UnitTypeCode)
permIssue[i] = perm.CanRead(UnitTypeIssues)
permPR[i] = perm.CanRead(UnitTypePullRequests)
permDataset[i] = perm.CanRead(UnitTypeDatasets)
}
}

@@ -267,6 +281,10 @@ func notifyWatchers(e Engine, actions ...*Action) error {
if !permPR[i] {
continue
}
case ActionDatasetRecommended:
if !permDataset[i] {
continue
}
}

if _, err = e.InsertOne(act); err != nil {
@@ -279,7 +297,6 @@ func notifyWatchers(e Engine, actions ...*Action) error {

// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(actions ...*Action) error {

error := notifyWatchers(x, actions...)
producer(actions...)
return error
@@ -287,7 +304,7 @@ func NotifyWatchers(actions ...*Action) error {

func producer(actions ...*Action) {
for _, action := range actions {
if !action.IsPrivate{
if !action.IsPrivate {
ActionChan <- action
}
}


+ 79
- 0
models/reward_admin_log.go View File

@@ -0,0 +1,79 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
)

const (
RewardAdminLogProcessing = 1
RewardAdminLogSuccess = 2
RewardAdminLogFailed = 3
)

type RewardAdminLog struct {
ID int64 `xorm:"pk autoincr"`
LogId string `xorm:"INDEX NOT NULL"`
Amount int64 `xorm:"NOT NULL"`
RewardType string
Remark string
Status int
TargetUserId int64 `xorm:"INDEX NOT NULL"`
CreatorId int64 `xorm:"NOT NULL"`
CreatorName string
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
}

func (r *RewardAdminLog) ToShow() *RewardAdminLogShow {
return &RewardAdminLogShow{
CreatorName: r.CreatorName,
}
}

type RewardAdminLogShow struct {
CreatorName string
}

type AdminLogAndUser struct {
AdminRewardAdminLog RewardAdminLog `xorm:"extends"`
User User `xorm:"extends"`
}

func getRewardAdminLog(ra *RewardAdminLog) (*RewardAdminLog, error) {
has, err := x.Get(ra)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRecordNotExist{}
}
return ra, nil
}

func InsertRewardAdminLog(ra *RewardAdminLog) (int64, error) {
return x.Insert(ra)
}

func UpdateRewardAdminLogStatus(logId string, oldStatus, newStatus int) error {
_, err := x.Where("log_id = ? and status = ?", logId, oldStatus).Update(&RewardAdminLog{Status: newStatus})
if err != nil {
return err
}
return nil
}

func GetRewardAdminLogByLogIds(logIds []string) ([]*RewardAdminLog, error) {
if len(logIds) == 0 {
return nil, nil
}
adminLogs := make([]*AdminLogAndUser, 0)
err := x.Table("reward_admin_log").Join("LEFT", "user", "reward_admin_log.creator_id = public.user.id").In("reward_admin_log.log_id", logIds).Find(&adminLogs)
if err != nil {
return nil, err
}
r := make([]*RewardAdminLog, len(adminLogs))
for i, v := range adminLogs {
temp := &v.AdminRewardAdminLog
temp.CreatorName = v.User.Name
r[i] = temp
}
return r, nil
}

+ 485
- 0
models/reward_operate_record.go View File

@@ -0,0 +1,485 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"fmt"
"strconv"
"strings"
"xorm.io/builder"
)

type SourceType string

const (
SourceTypeAccomplishTask SourceType = "ACCOMPLISH_TASK"
SourceTypeAdminOperate SourceType = "ADMIN_OPERATE"
SourceTypeRunCloudbrainTask SourceType = "RUN_CLOUDBRAIN_TASK"
)

func (r SourceType) Name() string {
switch r {
case SourceTypeAccomplishTask:
return "ACCOMPLISH_TASK"
case SourceTypeAdminOperate:
return "ADMIN_OPERATE"
case SourceTypeRunCloudbrainTask:
return "RUN_CLOUDBRAIN_TASK"
default:
return ""
}
}

type RewardType string

const (
RewardTypePoint RewardType = "POINT"
)

func (r RewardType) Name() string {
switch r {
case RewardTypePoint:
return "POINT"
default:
return ""
}
}
func (r RewardType) Show() string {
switch r {
case RewardTypePoint:
return "积分"
default:
return ""
}
}
func GetRewardTypeInstance(s string) RewardType {
switch s {
case RewardTypePoint.Name():
return RewardTypePoint
default:
return ""
}
}

type RewardOperateType string

func (r RewardOperateType) Name() string {
switch r {
case OperateTypeIncrease:
return "INCREASE"
case OperateTypeDecrease:
return "DECREASE"
default:
return ""
}
}
func (r RewardOperateType) Show() string {
switch r {
case OperateTypeIncrease:
return "奖励"
case OperateTypeDecrease:
return "扣减"
default:
return ""
}
}

func GetRewardOperateTypeInstance(s string) RewardOperateType {
switch s {
case OperateTypeIncrease.Name():
return OperateTypeIncrease
case OperateTypeDecrease.Name():
return OperateTypeDecrease
default:
return ""
}
}

const (
OperateTypeIncrease RewardOperateType = "INCREASE"
OperateTypeDecrease RewardOperateType = "DECREASE"
OperateTypeNull RewardOperateType = "NIL"
)

const (
OperateStatusOperating = "OPERATING"
OperateStatusSucceeded = "SUCCEEDED"
OperateStatusFailed = "FAILED"
)

const Semicolon = ";"

type RewardOperateOrderBy string

const (
RewardOrderByIDDesc RewardOperateOrderBy = "reward_operate_record.id desc"
)

type RewardRecordList []*RewardOperateRecord
type RewardRecordShowList []*RewardOperateRecordShow

func (l RewardRecordShowList) loadAttribute(isAdmin bool) {
l.loadAction()
l.loadCloudbrain()
if isAdmin {
l.loadAdminLog()
}
}

func (l RewardRecordShowList) loadAction() error {
if len(l) == 0 {
return nil
}
actionIds := make([]int64, 0)
for _, r := range l {
if r.SourceType != SourceTypeAccomplishTask.Name() {
continue
}
i, _ := strconv.ParseInt(r.SourceId, 10, 64)
actionIds = append(actionIds, i)
}
actions, err := GetActionByIds(actionIds)
if err != nil {
return err
}
actionIdMap := make(map[string]*Action, 0)
for _, v := range actions {
actionIdMap[fmt.Sprint(v.ID)] = v
}

for i, r := range l {
act := actionIdMap[r.SourceId]
if act != nil {
l[i].Action = act.ToShow()
}
}
return nil
}

func (l RewardRecordShowList) loadCloudbrain() error {
if len(l) == 0 {
return nil
}
cloudbrainIds := make([]int64, 0)
cloudbrainMap := make(map[int64]*RewardOperateRecordShow, 0)
for _, r := range l {
if r.SourceType != SourceTypeRunCloudbrainTask.Name() {
continue
}
i, _ := strconv.ParseInt(r.SourceId, 10, 64)
cloudbrainIds = append(cloudbrainIds, i)
cloudbrainMap[i] = r
}
cloudbrains, err := GetCloudbrainByIds(cloudbrainIds)
if err != nil {
return err
}
var repoIds []int64
var taskIds []int64
for _, task := range cloudbrains {
repoIds = append(repoIds, task.RepoID)
taskIds = append(taskIds, task.ID)
}
repositoryMap, err := GetRepositoriesMapByIDs(repoIds)
specMap, err := GetResourceSpecMapByCloudbrainIDs(taskIds)
if err != nil {
return err
}
for _, v := range cloudbrains {
v.Repo = repositoryMap[v.RepoID]
v.Spec = specMap[v.ID]
cloudbrainMap[v.ID].Cloudbrain = v.ToShow()
}

return nil

}

func (l RewardRecordShowList) loadAdminLog() error {
if len(l) == 0 {
return nil
}
logIds := make([]string, 0)
logMap := make(map[string]*RewardOperateRecordShow, 0)
for _, r := range l {
if r.SourceType != SourceTypeAdminOperate.Name() {
continue
}
logIds = append(logIds, r.SourceId)
logMap[r.SourceId] = r
}
adminLogs, err := GetRewardAdminLogByLogIds(logIds)
if err != nil {
return err
}
for _, v := range adminLogs {
logMap[v.LogId].AdminLog = v.ToShow()
}

return nil

}

type RewardOperateRecord struct {
ID int64 `xorm:"pk autoincr"`
SerialNo string `xorm:"INDEX NOT NULL"`
UserId int64 `xorm:"INDEX NOT NULL"`
Amount int64 `xorm:"NOT NULL"`
LossAmount int64
Title string
RewardType string `xorm:"NOT NULL"`
SourceType string `xorm:"NOT NULL"`
SourceId string `xorm:"INDEX NOT NULL"`
SourceTemplateId string
RequestId string `xorm:"INDEX NOT NULL"`
OperateType string `xorm:"NOT NULL"`
Status string `xorm:"NOT NULL"`
Remark string
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
LastOperateUnix timeutil.TimeStamp `xorm:"INDEX"`
}

type AdminRewardOperateReq struct {
TargetUserId int64 `binding:"Required"`
OperateType RewardOperateType `binding:"Required"`
Amount int64 `binding:"Required;Range(1,100000)"`
Remark string
RewardType RewardType
}

type RewardOperateRecordShow struct {
SerialNo string
Status string
OperateType string
SourceId string
Amount int64
LossAmount int64
BalanceAfter int64
Remark string
SourceType string
UserName string
LastOperateDate timeutil.TimeStamp
UnitPrice int64
SuccessCount int
Action *ActionShow
Cloudbrain *CloudbrainShow
AdminLog *RewardAdminLogShow
}

func getPointOperateRecord(tl *RewardOperateRecord) (*RewardOperateRecord, error) {
has, err := x.Get(tl)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRecordNotExist{}
}
return tl, nil
}

func GetPointOperateRecordBySourceTypeAndRequestId(sourceType, requestId, operateType string) (*RewardOperateRecord, error) {
t := &RewardOperateRecord{
SourceType: sourceType,
RequestId: requestId,
OperateType: operateType,
}
return getPointOperateRecord(t)
}

func GetPointOperateRecordBySerialNo(serialNo string) (*RewardOperateRecord, error) {
t := &RewardOperateRecord{
SerialNo: serialNo,
}
return getPointOperateRecord(t)
}

func InsertRewardOperateRecord(tl *RewardOperateRecord) (int64, error) {
return x.Insert(tl)
}

func UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus string) (int64, error) {
r := &RewardOperateRecord{
Status: newStatus,
LastOperateUnix: timeutil.TimeStampNow(),
}
return x.Cols("status", "last_operate_unix").Where("source_type=? and request_id=? and status=?", sourceType, requestId, OperateStatusOperating).Update(r)
}

func SumRewardAmountInTaskPeriod(rewardType string, sourceType string, userId int64, period *PeriodResult) (int64, error) {
var cond = builder.NewCond()
if period != nil {
cond = cond.And(builder.Gte{"created_unix": period.StartTime.Unix()})
cond = cond.And(builder.Lt{"created_unix": period.EndTime.Unix()})
}
if sourceType != "" {
cond = cond.And(builder.Eq{"source_type": sourceType})
}
cond = cond.And(builder.Eq{"reward_type": rewardType})
cond = cond.And(builder.Eq{"user_id": userId})
return x.Where(cond).SumInt(&RewardOperateRecord{}, "amount")
}

type RewardOperateContext struct {
SourceType SourceType
SourceId string
SourceTemplateId string
Title string
Remark string
Reward Reward
TargetUserId int64
RequestId string
OperateType RewardOperateType
RejectPolicy LimiterRejectPolicy
PermittedNegative bool
LossAmount int64
}

type Reward struct {
Amount int64
Type RewardType
}

type UserRewardOperationRedis struct {
UserId int64
Amount int64
RewardType RewardType
OperateType RewardOperateType
}

type UserRewardOperation struct {
UserId int64
Msg string
}

func AppendRemark(remark, appendStr string) string {
return strings.TrimPrefix(remark+Semicolon+appendStr, Semicolon)
}

type RewardRecordListOpts struct {
ListOptions
UserId int64
UserName string
OperateType RewardOperateType
RewardType RewardType
SourceType string
TaskType string
SerialNo string
OrderBy RewardOperateOrderBy
IsAdmin bool
Status string
}

func (opts *RewardRecordListOpts) toCond() builder.Cond {
if opts.Page <= 0 {
opts.Page = 1
}

if len(opts.OrderBy) == 0 {
opts.OrderBy = RewardOrderByIDDesc
}

cond := builder.NewCond()
if opts.UserId > 0 {
cond = cond.And(builder.Eq{"reward_operate_record.user_id": opts.UserId})
}
if opts.OperateType != OperateTypeNull {
cond = cond.And(builder.Eq{"reward_operate_record.operate_type": opts.OperateType.Name()})
}
if opts.SourceType != "" {
cond = cond.And(builder.Eq{"reward_operate_record.source_type": opts.SourceType})
}
if opts.TaskType != "" {
cond = cond.And(builder.Eq{"reward_operate_record.source_template_id": opts.TaskType})
}
if opts.SerialNo != "" {
cond = cond.And(builder.Like{"reward_operate_record.serial_no", opts.SerialNo})
}
if opts.Status != "" {
cond = cond.And(builder.Like{"reward_operate_record.status", opts.Status})
}

cond = cond.And(builder.Eq{"reward_operate_record.reward_type": opts.RewardType.Name()})
cond = cond.And(builder.Gt{"reward_operate_record.amount": 0})
return cond
}

type TestTT struct {
SerialNo string
UserId int64
Amount int64
UserName string
}

func GetRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowList, int64, error) {
cond := opts.toCond()
count, err := x.Where(cond).Count(&RewardOperateRecord{})
if err != nil {
return nil, 0, err
}
r := make([]*RewardOperateRecordShow, 0)
err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no",
"reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount",
"reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type",
"reward_operate_record.last_operate_unix as last_operate_date").
Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r)

if err != nil {
return nil, 0, err
}
RewardRecordShowList(r).loadAttribute(false)
return r, count, nil
}

func GetAdminRewardRecordShowList(opts *RewardRecordListOpts) (RewardRecordShowList, int64, error) {
cond := opts.toCond()
count, err := x.Where(cond).Count(&RewardOperateRecord{})
if err != nil {
return nil, 0, err
}
r := make([]*RewardOperateRecordShow, 0)
switch opts.OperateType {
case OperateTypeIncrease:
err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no",
"reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount",
"reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type",
"reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name",
"point_account_log.balance_after").
Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id").
Join("LEFT", "point_account_log", " reward_operate_record.serial_no = point_account_log.source_id").
Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r)
case OperateTypeDecrease:
err = x.Table("reward_operate_record").Cols("reward_operate_record.source_id", "reward_operate_record.serial_no",
"reward_operate_record.status", "reward_operate_record.operate_type", "reward_operate_record.amount",
"reward_operate_record.loss_amount", "reward_operate_record.remark", "reward_operate_record.source_type",
"reward_operate_record.last_operate_unix as last_operate_date", "public.user.name as user_name",
"reward_periodic_task.amount as unit_price", "reward_periodic_task.success_count").
Join("LEFT", "public.user", "reward_operate_record.user_id = public.user.id").
Join("LEFT", "reward_periodic_task", "reward_operate_record.serial_no = reward_periodic_task.operate_serial_no").
Where(cond).Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).OrderBy(string(opts.OrderBy)).Find(&r)
}

if err != nil {
return nil, 0, err
}
RewardRecordShowList(r).loadAttribute(true)
return r, count, nil
}

func IsWechatOpenIdRewarded(wechatOpenId string) bool {
actions := make([]Action, 0)
err := x.Where(" op_type = ? and content = ?", ActionBindWechat, wechatOpenId).Find(&actions)

if err != nil {
log.Error("IsWechatOpenIdRewarded find actions err.%v", err)
return true
}
if len(actions) == 0 {
return false
}
actionIds := make([]int64, len(actions))
for i, v := range actions {
actionIds[i] = v.ID
}
n, _ := x.Where(builder.Eq{"source_type": SourceTypeAccomplishTask}.And(builder.In("source_id", actionIds))).Count(&RewardOperateRecord{})
return n > 0
}

+ 115
- 0
models/reward_periodic_task.go View File

@@ -0,0 +1,115 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"time"
)

type PeriodicTaskStatus int

const (
PeriodicTaskStatusRunning = iota + 1 // 1
PeriodicTaskStatusFinished // 2
)

type PeriodType string

const (
PeriodType30MinutesFree1HourCost PeriodType = "30MF1HC"
)

func (r PeriodType) Name() string {
switch r {
case PeriodType30MinutesFree1HourCost:
return "30MF1HC"
default:
return ""
}
}

type RewardPeriodicTask struct {
ID int64 `xorm:"pk autoincr"`
OperateSerialNo string `xorm:"INDEX NOT NULL"`
DelaySeconds int64
IntervalSeconds int64
Amount int64 `xorm:"NOT NULL"`
NextExecuteTime timeutil.TimeStamp `xorm:"INDEX NOT NULL"`
SuccessCount int `xorm:"NOT NULL default 0"`
Status int `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
FinishedUnix timeutil.TimeStamp `xorm:"INDEX"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

type StartPeriodicTaskOpts struct {
SourceType SourceType
SourceId string
Remark string
Title string
TargetUserId int64
RequestId string
OperateType RewardOperateType
Delay time.Duration
Interval time.Duration
UnitAmount int
RewardType RewardType
StartTime time.Time
}

func InsertPeriodicTask(tl *RewardPeriodicTask) (int64, error) {
return x.Insert(tl)
}

func GetRunningRewardTask(now time.Time) ([]RewardPeriodicTask, error) {
r := make([]RewardPeriodicTask, 0)
err := x.Where("next_execute_time <= ? and status = ?", now.Unix(), PeriodicTaskStatusRunning).Find(&r)
if err != nil {
return nil, err
}
return r, err
}

func IncrRewardTaskSuccessCount(t RewardPeriodicTask, count int64, nextTime timeutil.TimeStamp) error {
sess := x.NewSession()
defer sess.Close()
_, err := sess.Exec("update reward_periodic_task set success_count = success_count + ? , next_execute_time = ?, updated_unix = ? where id = ?", count, nextTime, timeutil.TimeStampNow(), t.ID)
if err != nil {
sess.Rollback()
return err
}
_, err = sess.Exec("update reward_operate_record set amount = amount + ? ,updated_unix = ? ,last_operate_unix = ? where serial_no = ?", t.Amount, timeutil.TimeStampNow(), timeutil.TimeStampNow(), t.OperateSerialNo)
if err != nil {
sess.Rollback()
return err
}
sess.Commit()
return nil
}

func GetPeriodicTaskBySourceIdAndType(sourceType SourceType, sourceId string, operateType RewardOperateType) (*RewardPeriodicTask, error) {
r := RewardPeriodicTask{}
_, err := x.SQL("select rpt.* from reward_periodic_task rpt "+
"inner join reward_operate_record ror on rpt.operate_serial_no = ror.serial_no"+
" where ror.source_type = ? and ror.source_id = ? and ror.operate_type = ? ", sourceType.Name(), sourceId, operateType.Name()).Get(&r)
if err != nil {
return nil, err
}
return &r, nil
}

func StopPeriodicTask(taskId int64, operateSerialNo string, stopTime time.Time) error {
sess := x.NewSession()
defer sess.Close()
_, err := sess.Where("id = ? and status = ?", taskId, PeriodicTaskStatusRunning).Update(&RewardPeriodicTask{Status: PeriodicTaskStatusFinished, FinishedUnix: timeutil.TimeStamp(stopTime.Unix())})
if err != nil {
sess.Rollback()
return err
}
_, err = sess.Where("serial_no = ? and status = ?", operateSerialNo, OperateStatusOperating).Update(&RewardOperateRecord{Status: OperateStatusSucceeded})
if err != nil {
sess.Rollback()
return err
}
sess.Commit()
return nil
}

+ 44
- 0
models/task_accomplish_log.go View File

@@ -0,0 +1,44 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"time"
)

type TaskAccomplishLog struct {
ID int64 `xorm:"pk autoincr"`
ConfigId int64 `xorm:"NOT NULL"`
TaskCode string `xorm:"NOT NULL"`
UserId int64 `xorm:"INDEX NOT NULL"`
ActionId int64
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
}

type PeriodResult struct {
StartTime time.Time
EndTime time.Time
LeftTime time.Duration
}

func getTaskAccomplishLog(tl *TaskAccomplishLog) (*TaskAccomplishLog, error) {
has, err := x.Get(tl)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRecordNotExist{}
}
return tl, nil
}

func CountTaskAccomplishLogInTaskPeriod(taskCode string, userId int64, period *PeriodResult) (int64, error) {
if period == nil {
return x.Where("task_code = ? and user_id = ?", taskCode, userId).Count(&TaskAccomplishLog{})
} else {
return x.Where("task_code = ? and user_id = ? and created_unix >= ? and created_unix < ? ", taskCode, userId, period.StartTime.Unix(), period.EndTime.Unix()).Count(&TaskAccomplishLog{})
}

}

func InsertTaskAccomplishLog(tl *TaskAccomplishLog) (int64, error) {
return x.Insert(tl)
}

+ 374
- 0
models/task_config.go View File

@@ -0,0 +1,374 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)

const (
PeriodNotCycle = "NOT_CYCLE"
PeriodDaily = "DAILY"
)

type TaskType string

const (
TaskCreatePublicRepo TaskType = "CreatePublicRepo"
TaskCreateIssue TaskType = "CreateIssue"
TaskCreatePullRequest TaskType = "CreatePullRequest"
TaskCommentIssue TaskType = "CommentIssue"
TaskUploadAttachment TaskType = "UploadAttachment"
TaskCreateNewModelTask TaskType = "CreateNewModelTask"
TaskBindWechat TaskType = "BindWechat"
TaskCreateCloudbrainTask TaskType = "CreateCloudbrainTask"
TaskDatasetRecommended TaskType = "DatasetRecommended"
TaskCreateImage TaskType = "CreateImage"
TaskImageRecommend TaskType = "ImageRecommend"
TaskChangeUserAvatar TaskType = "ChangeUserAvatar"
TaskPushCommits TaskType = "PushCommits"
)

func GetTaskTypeFromAction(a ActionType) TaskType {
switch a {
case ActionCreateDebugGPUTask,
ActionCreateDebugNPUTask,
ActionCreateTrainTask,
ActionCreateInferenceTask,
ActionCreateBenchMarkTask,
ActionCreateGPUTrainTask,
ActionCreateGrampusNPUTrainTask,
ActionCreateGrampusGPUTrainTask:
return TaskCreateCloudbrainTask
case ActionCreateRepo:
return TaskCreatePublicRepo
case ActionCreatePullRequest:
return TaskCreatePullRequest
case ActionCommentIssue:
return TaskCommentIssue
case ActionUploadAttachment:
return TaskUploadAttachment
case ActionCreateNewModelTask:
return TaskCreateNewModelTask
case ActionBindWechat:
return TaskBindWechat
case ActionDatasetRecommended:
return TaskDatasetRecommended
case ActionImageRecommend:
return TaskImageRecommend
case ActionCreateImage:
return TaskCreateImage
case ActionChangeUserAvatar:
return TaskChangeUserAvatar
case ActionCommitRepo,
ActionDeleteBranch,
ActionPushTag,
ActionDeleteTag:
return TaskPushCommits
case ActionCreateIssue:
return TaskCreateIssue
}
return ""
}

//PointTaskConfig Only add and delete are allowed, edit is not allowed
//so if you want to edit config for some task code,please delete first and add new one
type TaskConfig struct {
ID int64 `xorm:"pk autoincr"`
TaskCode string `xorm:"NOT NULL"`
Title string
AwardType string `xorm:"NOT NULL"`
AwardAmount int64 `xorm:"NOT NULL"`
CreatorId int64 `xorm:"NOT NULL"`
CreatorName string
CreatedUnix timeutil.TimeStamp `xorm:"created"`
DeletedAt timeutil.TimeStamp `xorm:"deleted"`
DeleterId int64
DeleterName string
}

type TaskConfigWithLimit struct {
ID int64
TaskCode string
Title string
AwardType string
AwardAmount int64
Creator string
IsDeleted bool
CreatedUnix timeutil.TimeStamp
DeleteAt timeutil.TimeStamp
Limiters []*LimitConfigVO
}

type TaskConfigWithLimitResponse struct {
Records []*TaskConfigWithSingleLimit
Total int64
PageSize int
Page int
}

type TaskConfigWithSingleLimit struct {
ID int64
TaskCode string
AwardType string
AwardAmount int64
Creator string
IsDeleted bool
CreatedUnix timeutil.TimeStamp
DeleteAt timeutil.TimeStamp
RefreshRate string
LimitNum int64
}

type TaskAndLimiterConfig struct {
TaskConfig TaskConfig `xorm:"extends"`
LimitConfig LimitConfig `xorm:"extends"`
}

type PointRule struct {
UserDailyLimit int64
TaskRules []TaskRule
}

type TaskRule struct {
TaskCode string
AwardType string
AwardAmount int64
RefreshRate string
LimitNum int64
}

func (TaskAndLimiterConfig) TableName() string {
return "task_config"
}

type BatchLimitConfigVO struct {
ConfigList []TaskConfigWithLimit
}

func getTaskConfig(t *TaskConfig) (*TaskConfig, error) {
has, err := x.Get(t)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRecordNotExist{}
}
return t, nil
}

func GetTaskConfigByTaskCode(taskCode string) (*TaskConfig, error) {
t := &TaskConfig{
TaskCode: taskCode,
}
return getTaskConfig(t)
}

func GetTaskConfigByID(id int64) (*TaskConfig, error) {
t := &TaskConfig{
ID: id,
}
return getTaskConfig(t)
}

func GetTaskConfigList() ([]*TaskConfig, error) {
r := make([]*TaskConfig, 0)
err := x.Find(&r)
if err != nil {
return nil, err
}
if len(r) == 0 {
return nil, ErrRecordNotExist{}
}
return r, nil
}

type GetTaskConfigOpts struct {
ListOptions
Status int //1 normal 2 deleted
TaskType string
}

func GetTaskConfigPageWithDeleted(opt GetTaskConfigOpts) ([]*TaskAndLimiterConfig, int64, error) {
if opt.Page <= 0 {
opt.Page = 1
}
cond := builder.NewCond()
if opt.TaskType != "" {
cond = cond.And(builder.Eq{"task_code": opt.TaskType})
}

var count int64
var err error
if opt.Status == 1 {
subCond := builder.NewCond()
subCond = subCond.Or(builder.IsNull{"task_config.deleted_at"})
subCond = subCond.Or(builder.Eq{"task_config.deleted_at": 0})
cond = cond.And(subCond)
} else if opt.Status == 2 {
cond = cond.And(builder.Gt{"task_config.deleted_at": 0})
}
count, err = x.Unscoped().Where(cond).Count(&TaskConfig{})
if err != nil {
return nil, 0, err
}
r := make([]*TaskAndLimiterConfig, 0)
err = x.Join("LEFT", "limit_config", "task_config.id = limit_config.related_id").
Unscoped().Where(cond).Limit(opt.PageSize, (opt.Page-1)*opt.PageSize).
OrderBy("task_config.deleted_at desc,task_config.id desc").Find(&r)

if len(r) == 0 {
return nil, 0, ErrRecordNotExist{}
}
return r, count, nil
}

func EditTaskConfig(config TaskConfigWithLimit, doer *User) error {
sess := x.NewSession()
defer sess.Close()

//delete old task config
p := &TaskConfig{
ID: config.ID,
}
_, err := sess.Delete(p)
if err != nil {
sess.Rollback()
return err
}
//update deleter
p.DeleterId = doer.ID
p.DeleterName = doer.Name
sess.Where("id = ?", config.ID).Unscoped().Update(p)

//add new config
t := &TaskConfig{
TaskCode: config.TaskCode,
Title: config.Title,
AwardType: config.AwardType,
AwardAmount: config.AwardAmount,
CreatorId: doer.ID,
CreatorName: doer.Name,
}
_, err = sess.InsertOne(t)
if err != nil {
sess.Rollback()
return err
}

//delete old limiter config
lp := &LimitConfig{
RelatedId: config.ID,
}
_, err = sess.Delete(lp)
if err != nil {
sess.Rollback()
return err
}
lp.DeleterName = doer.Name
lp.DeleterId = doer.ID
//update deleter
sess.Where("related_id = ?", config.ID).Unscoped().Update(lp)

//add new limiter config
if config.Limiters != nil && len(config.Limiters) > 0 {
for _, v := range config.Limiters {
//add new config
l := &LimitConfig{
Title: v.Title,
RefreshRate: v.RefreshRate,
Scope: v.Scope,
LimitNum: v.LimitNum,
LimitCode: config.TaskCode,
LimitType: LimitTypeTask.Name(),
CreatorId: doer.ID,
CreatorName: doer.Name,
RelatedId: t.ID,
}
_, err = sess.Insert(l)
if err != nil {
sess.Rollback()
return err
}
}
}
sess.Commit()
return nil
}

func NewTaskConfig(config TaskConfigWithLimit, doer *User) error {
sess := x.NewSession()
defer sess.Close()

//add new config
t := &TaskConfig{
TaskCode: config.TaskCode,
Title: config.Title,
AwardType: config.AwardType,
AwardAmount: config.AwardAmount,
CreatorId: doer.ID,
CreatorName: doer.Name,
}
_, err := sess.InsertOne(t)
if err != nil {
sess.Rollback()
return err
}

//add new limiter config
if config.Limiters != nil && len(config.Limiters) > 0 {
for _, v := range config.Limiters {
//add new config
l := &LimitConfig{
RelatedId: t.ID,
Title: v.Title,
RefreshRate: v.RefreshRate,
Scope: v.Scope,
LimitNum: v.LimitNum,
LimitCode: config.TaskCode,
LimitType: LimitTypeTask.Name(),
CreatorId: doer.ID,
CreatorName: doer.Name,
}
_, err = sess.Insert(l)
if err != nil {
sess.Rollback()
return err
}
}
}
sess.Commit()
return nil
}

func DelTaskConfig(id int64, doer *User) error {
sess := x.NewSession()
defer sess.Close()

//delete old task config
p := &TaskConfig{
ID: id,
}
_, err := sess.Delete(p)
if err != nil {
sess.Rollback()
return err
}
//update deleter
p.DeleterId = doer.ID
p.DeleterName = doer.Name
sess.Where("id = ?", id).Unscoped().Update(p)
//delete old limiter config
lp := &LimitConfig{
RelatedId: id,
}
_, err = sess.Delete(lp)
if err != nil {
sess.Rollback()
return err
}
lp.DeleterName = doer.Name
lp.DeleterId = doer.ID
//update deleter
sess.Where("related_id = ?", id).Unscoped().Update(lp)
sess.Commit()
return nil
}

+ 4
- 0
models/user.go View File

@@ -188,6 +188,10 @@ type User struct {
PhoneNumber string `xorm:"VARCHAR(255)"`
}

type UserShow struct {
Name string
}

// SearchOrganizationsOptions options to filter organizations
type SearchOrganizationsOptions struct {
ListOptions


+ 72
- 5
models/user_business_analysis.go View File

@@ -106,7 +106,8 @@ type UserBusinessAnalysisAll struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysis struct {
@@ -193,7 +194,8 @@ type UserBusinessAnalysis struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisQueryOptions struct {
@@ -354,6 +356,33 @@ func QueryRankList(key string, tableName string, limit int) ([]*UserBusinessAnal
return userBusinessAnalysisAllList, int64(len(userBusinessAnalysisAllList))
}

func QueryUserInvitationDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string, invitationNum int) ([]*UserBusinessAnalysisAll, int64) {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
var cond = builder.NewCond()
if len(userName) > 0 {
cond = cond.And(
builder.Like{"lower(name)", strings.ToLower(userName)},
)
}
cond = cond.And(
builder.Gte{"invitation_user_num": invitationNum},
)

allCount, err := statictisSess.Where(cond).Count(queryObj)
if err != nil {
log.Info("query error." + err.Error())
return nil, 0
}
log.Info("query return total:" + fmt.Sprint(allCount))
userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0)
if err := statictisSess.Table(tableName).Where(cond).OrderBy("invitation_user_num desc,id asc").Limit(pageSize, start).
Find(&userBusinessAnalysisAllList); err != nil {
return nil, 0
}
return userBusinessAnalysisAllList, allCount
}

func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string) ([]*UserBusinessAnalysisAll, int64) {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
@@ -363,6 +392,7 @@ func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, q
builder.Like{"lower(name)", strings.ToLower(userName)},
)
}

allCount, err := statictisSess.Where(cond).Count(queryObj)
if err != nil {
log.Info("query error." + err.Error())
@@ -752,6 +782,8 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
CollectImage, CollectedImage := queryImageStars(start_unix, end_unix)
RecommendImage := queryRecommedImage(start_unix, end_unix)

InvitationMap := queryUserInvitationCount(start_unix, end_unix)

DataDate := currentTimeNow.Format("2006-01-02") + " 00:01"

cond := "type != 1 and is_active=true"
@@ -825,7 +857,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
dateRecordAll.CollectImage = getMapValue(dateRecordAll.ID, CollectImage)
dateRecordAll.CollectedImage = getMapValue(dateRecordAll.ID, CollectedImage)
dateRecordAll.RecommendImage = getMapValue(dateRecordAll.ID, RecommendImage)
dateRecordAll.InvitationUserNum = getMapValue(dateRecordAll.ID, InvitationMap)
dateRecordAll.UserIndexPrimitive = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight)
userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndexPrimitive
if maxUserIndex < dateRecordAll.UserIndexPrimitive {
@@ -888,7 +920,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static

insertBatchSql := "INSERT INTO public." + tableName +
"(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " +
"commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive,phone) " +
"commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive,phone,invitation_user_num) " +
"VALUES"

for i, record := range dateRecords {
@@ -897,7 +929,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static
", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) +
", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) +
", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "'," +
fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ",'" + record.Phone + "')"
fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ",'" + record.Phone + "'" + "," + fmt.Sprint(record.InvitationUserNum) + ")"
if i < (len(dateRecords) - 1) {
insertBatchSql += ","
}
@@ -2173,6 +2205,41 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s

return resultMap, resultItemMap
}

func queryUserInvitationCount(start_unix int64, end_unix int64) map[int64]int {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()

resultMap := make(map[int64]int)
cond := "created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := statictisSess.Where(cond).Count(new(Invitation))
if err != nil {
log.Info("query queryUserInvitationCount error. return.")
return resultMap
}
var indexTotal int64
indexTotal = 0
for {
statictisSess.Select("id,src_user_id,user_id").Table("invitation").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
invitationList := make([]*Invitation, 0)
statictisSess.Find(&invitationList)
log.Info("query invitationList size=" + fmt.Sprint(len(invitationList)))
for _, invitationRecord := range invitationList {
if _, ok := resultMap[invitationRecord.SrcUserID]; !ok {
resultMap[invitationRecord.SrcUserID] = 1
} else {
resultMap[invitationRecord.SrcUserID] += 1
}
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
log.Info("invitationList size=" + fmt.Sprint(len(resultMap)))
return resultMap
}

func setMapKey(key string, userId int64, value int, resultItemMap map[string]int) {
newKey := fmt.Sprint(userId) + "_" + key
if _, ok := resultItemMap[newKey]; !ok {


+ 14
- 7
models/user_business_struct.go View File

@@ -66,7 +66,8 @@ type UserBusinessAnalysisCurrentYear struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLast30Day struct {
@@ -133,7 +134,8 @@ type UserBusinessAnalysisLast30Day struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLastMonth struct {
@@ -200,7 +202,8 @@ type UserBusinessAnalysisLastMonth struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisCurrentMonth struct {
@@ -267,7 +270,8 @@ type UserBusinessAnalysisCurrentMonth struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisCurrentWeek struct {
@@ -335,7 +339,8 @@ type UserBusinessAnalysisCurrentWeek struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisYesterday struct {
@@ -403,7 +408,8 @@ type UserBusinessAnalysisYesterday struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLastWeek struct {
@@ -471,7 +477,8 @@ type UserBusinessAnalysisLastWeek struct {
CollectedImage int `xorm:"NOT NULL DEFAULT 0"`
RecommendImage int `xorm:"NOT NULL DEFAULT 0"`

Phone string `xorm:"NULL"`
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
}

type UserAnalysisPara struct {


+ 102
- 0
models/user_invitation.go View File

@@ -0,0 +1,102 @@
package models

import (
"fmt"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)

// Follow represents relations of user and his/her followers.
type Invitation struct {
ID int64 `xorm:"pk autoincr"`
SrcUserID int64 `xorm:"NOT NULL DEFAULT 0"`
UserID int64 `xorm:"NOT NULL DEFAULT 0"`
Phone string `xorm:"INDEX"`
Avatar string `xorm:"-"`
Name string `xorm:"-"`
InvitationUserNum int `xorm:"-"`
IsActive bool `xorm:"-"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}

func QueryInvitaionByPhone(phone string) []*Invitation {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
cond := "phone ='" + phone + "'"
invitationList := make([]*Invitation, 0)
if err := statictisSess.Table(new(Invitation)).Where(cond).
Find(&invitationList); err != nil {
return nil
} else {
return invitationList
}
}

func GetAllUserName() map[int64]string {
sess := x.NewSession()
defer sess.Close()
sess.Select("id,name").Table("user")
userList := make([]*User, 0)
reMap := make(map[int64]string)
sess.Find(&userList)
for _, user := range userList {
reMap[user.ID] = user.Name
}
return reMap
}

func QueryInvitaionPage(start int, pageSize int) ([]*Invitation, int64) {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
//cond := "created_unix >=" + fmt.Sprint(startTime) + " and created_unix <=" + fmt.Sprint(endTime)

allCount, err := statictisSess.Count(new(Invitation))
if err != nil {
log.Info("query error." + err.Error())
return nil, 0
}
invitationList := make([]*Invitation, 0)
if err := statictisSess.Table(new(Invitation)).OrderBy("created_unix desc").Limit(pageSize, start).
Find(&invitationList); err != nil {
return nil, 0
}
return invitationList, allCount
}

func QueryInvitaionByTime(startTime int64, endTime int64) []*Invitation {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
cond := "created_unix >=" + fmt.Sprint(startTime) + " and created_unix <=" + fmt.Sprint(endTime)
invitationList := make([]*Invitation, 0)
if err := statictisSess.Table(new(Invitation)).Where(cond).OrderBy("created_unix desc").
Find(&invitationList); err != nil {
return nil
}
return invitationList
}

func InsertInvitaion(invitationUser *Invitation) error {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
_, err := statictisSess.Insert(invitationUser)
return err
}

func QueryInvitaionBySrcUserId(srcUserId int64, start int, pageSize int) ([]*Invitation, int64) {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
cond := "src_user_id =" + fmt.Sprint(srcUserId)
allCount, err := statictisSess.Where(cond).Count(new(Invitation))
if err != nil {
log.Info("query error." + err.Error())
return nil, 0
}
invitationList := make([]*Invitation, 0)

if err := statictisSess.Table(new(Invitation)).Where(cond).OrderBy("created_unix desc").Limit(pageSize, start).
Find(&invitationList); err != nil {
return nil, 0
}
return invitationList, allCount
}

+ 4
- 0
models/wechat_bind.go View File

@@ -96,3 +96,7 @@ func UnbindWechatOpenId(userId int64, oldWechatOpenID string) error {
sess.Insert(logParam)
return sess.Commit()
}

func CountWechatBindLog(wechatOpenId string, action WechatBindAction) (int64, error) {
return x.Where("wechat_open_id = ? and action = ?", wechatOpenId, action).Count(&WechatBindLog{})
}

+ 5
- 0
modules/auth/cloudbrain.go View File

@@ -23,6 +23,11 @@ type CreateCloudBrainForm struct {
BootFile string `form:"boot_file"`
Params string `form:"run_para_list"`
BranchName string `form:"branch_name"`
ModelName string `form:"model_name"`
ModelVersion string `form:"model_version"`
CkptName string `form:"ckpt_name"`
LabelName string `form:"label_names"`
PreTrainModelUrl string `form:"pre_train_model_url"`
DatasetName string `form:"dataset_name"`
SpecId int64 `form:"spec_id"`
}


+ 5
- 0
modules/auth/grampus.go View File

@@ -18,6 +18,11 @@ type CreateGrampusTrainJobForm struct {
WorkServerNumber int `form:"work_server_number" binding:"Required"`
Image string `form:"image"`
DatasetName string `form:"dataset_name"`
ModelName string `form:"model_name"`
ModelVersion string `form:"model_version"`
CkptName string `form:"ckpt_name"`
LabelName string `form:"label_names"`
PreTrainModelUrl string `form:"pre_train_model_url"`
SpecId int64 `form:"spec_id"`
}



+ 6
- 0
modules/auth/modelarts.go View File

@@ -33,6 +33,7 @@ type CreateModelArtsTrainJobForm struct {
DisplayJobName string `form:"display_job_name" binding:"Required"`
JobName string `form:"job_name" binding:"Required"`
Attachment string `form:"attachment" binding:"Required"`
DatasetName string `form:"dataset_name"`
BootFile string `form:"boot_file" binding:"Required"`
WorkServerNumber int `form:"work_server_number" binding:"Required"`
EngineID int `form:"engine_id" binding:"Required"`
@@ -48,6 +49,11 @@ type CreateModelArtsTrainJobForm struct {
FlavorName string `form:"flaver_names" binding:"Required"`
EngineName string `form:"engine_names" binding:"Required"`
SpecId int64 `form:"spec_id" binding:"Required"`
ModelName string `form:"model_name"`
ModelVersion string `form:"model_version"`
CkptName string `form:"ckpt_name"`
LabelName string `form:"label_names"`
PreTrainModelUrl string `form:"pre_train_model_url"`
}

type CreateModelArtsInferenceJobForm struct {


+ 1
- 1
modules/auth/user_form.go View File

@@ -372,7 +372,7 @@ func (f *U2FDeleteForm) Validate(ctx *macaron.Context, errs binding.Errors) bind

type PhoneNumberForm struct {
PhoneNumber string `binding:"Required;MaxSize(20)"`
Mode int `binding:"Required"`
Mode int `binding:"Required"`
SlideID string `binding:"Required;MaxSize(100)"`
}



+ 11
- 8
modules/auth/wechat/access_token.go View File

@@ -1,20 +1,19 @@
package wechat

import (
"time"

"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"time"
)

const EMPTY_REDIS_VAL = "Nil"

var accessTokenLock = redis_lock.NewDistributeLock(redis_key.AccessTokenLockKey())

func GetWechatAccessToken() string {
token, _ := redis_client.Get(redis_key.WechatAccessTokenKey())
if token != "" {
if token == EMPTY_REDIS_VAL {
if token == redis_key.EMPTY_REDIS_VAL {
return ""
}
live, _ := redis_client.TTL(redis_key.WechatAccessTokenKey())
@@ -28,18 +27,22 @@ func GetWechatAccessToken() string {
}

func refreshAccessToken() {
if ok := accessTokenLock.Lock(3 * time.Second); ok {
if ok, _ := accessTokenLock.Lock(3 * time.Second); ok {
defer accessTokenLock.UnLock()
callAccessTokenAndUpdateCache()
}
}

func refreshAndGetAccessToken() string {
if ok := accessTokenLock.LockWithWait(3*time.Second, 3*time.Second); ok {
isOk, err := accessTokenLock.LockWithWait(3*time.Second, 3*time.Second)
if err != nil {
return ""
}
if isOk {
defer accessTokenLock.UnLock()
token, _ := redis_client.Get(redis_key.WechatAccessTokenKey())
if token != "" {
if token == EMPTY_REDIS_VAL {
if token == redis_key.EMPTY_REDIS_VAL {
return ""
}
return token
@@ -59,7 +62,7 @@ func callAccessTokenAndUpdateCache() string {
}

if token == "" {
redis_client.Setex(redis_key.WechatAccessTokenKey(), EMPTY_REDIS_VAL, 10*time.Second)
redis_client.Setex(redis_key.WechatAccessTokenKey(), redis_key.EMPTY_REDIS_VAL, 10*time.Second)
return ""
}
redis_client.Setex(redis_key.WechatAccessTokenKey(), token, time.Duration(r.Expires_in)*time.Second)


+ 3
- 3
modules/auth/wechat/bind.go View File

@@ -38,7 +38,7 @@ func (err WechatBindError) Error() string {
}

func BindWechat(userId int64, wechatOpenId string) error {
if !IsWechatAccountAvailable(userId, wechatOpenId) {
if !IsWechatAccountUsed(userId, wechatOpenId) {
log.Error("bind wechat failed, because user use wrong wechat account to bind,userId=%d wechatOpenId=%s", userId, wechatOpenId)
return NewWechatBindError(BIND_REPLY_WECHAT_ACCOUNT_USED)
}
@@ -60,9 +60,9 @@ func IsUserAvailableForWechatBind(userId int64, wechatOpenId string) bool {
return currentOpenId == "" || currentOpenId == wechatOpenId
}

//IsWechatAccountAvailable if wechat account used by another account,return false
//IsWechatAccountUsed if wechat account used by another account,return false
//if wechat account not used or used by the given user,return true
func IsWechatAccountAvailable(userId int64, wechatOpenId string) bool {
func IsWechatAccountUsed(userId int64, wechatOpenId string) bool {
user := models.GetUserByWechatOpenId(wechatOpenId)
if user != nil && user.WechatOpenId != "" && user.ID != userId {
return false


+ 2
- 0
modules/auth/wechat/client.go View File

@@ -95,6 +95,7 @@ func getWechatRestyClient() *resty.Client {
func callAccessToken() *AccessTokenResponse {
client := getWechatRestyClient()

log.Info("start to get wechat access token")
var result AccessTokenResponse
_, err := client.R().
SetQueryParam("grant_type", GRANT_TYPE).
@@ -106,6 +107,7 @@ func callAccessToken() *AccessTokenResponse {
log.Error("get wechat access token failed,e=%v", err)
return nil
}
log.Info("get wechat access token result=%v", result)
return &result
}



+ 14
- 0
modules/cloudbrain/cloudbrain.go View File

@@ -24,6 +24,7 @@ const (
CodeMountPath = "/code"
DataSetMountPath = "/dataset"
ModelMountPath = "/model"
PretrainModelMountPath = "/pretrainmodel"
LogFile = "log.txt"
BenchMarkMountPath = "/benchmark"
BenchMarkResourceID = 1
@@ -77,6 +78,8 @@ type GenerateCloudBrainTaskReq struct {
ModelVersion string
CkptName string
LabelName string
PreTrainModelPath string
PreTrainModelUrl string
Spec *models.Specification
}

@@ -276,6 +279,16 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error {
},
}

if req.PreTrainModelUrl != "" { //预训练
volumes = append(volumes, models.Volume{
HostPath: models.StHostPath{
Path: req.PreTrainModelPath,
MountPath: PretrainModelMountPath,
ReadOnly: true,
},
})
}

if len(req.DatasetInfos) == 1 {
volumes = append(volumes, models.Volume{
HostPath: models.StHostPath{
@@ -359,6 +372,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error {
CkptName: req.CkptName,
ResultUrl: req.ResultPath,
LabelName: req.LabelName,
PreTrainModelUrl: req.PreTrainModelUrl,
CreatedUnix: createTime,
UpdatedUnix: createTime,
CommitID: req.CommitID,


+ 8
- 4
modules/cloudbrain/resty.go View File

@@ -1,6 +1,7 @@
package cloudbrain

import (
"code.gitea.io/gitea/modules/notification"
"encoding/json"
"errors"
"fmt"
@@ -25,10 +26,10 @@ var (

const (
JobHasBeenStopped = "S410"
errInvalidToken = "S401"
Public = "public"
Custom = "custom"
LogPageSize = 500
errInvalidToken = "S401"
LogPageTokenExpired = "5m"
pageSize = 15
QueuesDetailUrl = "/rest-server/api/v2/queuesdetail"
@@ -144,7 +145,6 @@ sendjob:
if jobResult.Code != Success {
return &jobResult, fmt.Errorf("jobResult err: %s", res.String())
}

return &jobResult, nil
}

@@ -235,7 +235,7 @@ func getQueryString(page int, size int, name string) string {
return fmt.Sprintf("pageIndex=%d&pageSize=%d&name=%s", page, size, name)
}

func CommitImage(jobID string, params models.CommitImageParams) error {
func CommitImage(jobID string, params models.CommitImageParams, doer *models.User) error {
imageTag := strings.TrimSpace(params.ImageTag)

dbImage, err := models.GetImageByTag(imageTag)
@@ -340,11 +340,12 @@ sendjob:
})
if err == nil {
go updateImageStatus(image, isSetCreatedUnix, createTime)
notification.NotifyCreateImage(doer, image)
}
return err
}

func CommitAdminImage(params models.CommitImageParams) error {
func CommitAdminImage(params models.CommitImageParams, doer *models.User) error {
imageTag := strings.TrimSpace(params.ImageTag)
exist, err := models.IsImageExist(imageTag)

@@ -381,6 +382,9 @@ func CommitAdminImage(params models.CommitImageParams) error {
}
return nil
})
if err == nil {
notification.NotifyCreateImage(doer, image)
}
return err
}



+ 21
- 0
modules/context/point.go View File

@@ -0,0 +1,21 @@
package context

import (
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/services/reward/point/account"
"gitea.com/macaron/macaron"
)

// PointAccount returns a macaron to get request user's point account
func PointAccount() macaron.Handler {
return func(ctx *Context) {
a, err := account.GetAccount(ctx.User.ID)
if err != nil {
ctx.ServerError("GetPointAccount", err)
return
}
ctx.Data["PointAccount"] = a
ctx.Data["CloudBrainPaySwitch"] = setting.CloudBrainPaySwitch
ctx.Next()
}
}

+ 26
- 0
modules/cron/tasks_basic.go View File

@@ -5,6 +5,7 @@
package cron

import (
"code.gitea.io/gitea/services/reward"
"code.gitea.io/gitea/services/cloudbrain/resource"
"code.gitea.io/gitea/modules/modelarts"
"context"
@@ -209,6 +210,28 @@ func registerSyncCloudbrainStatus() {
})
}

func registerRewardPeriodTask() {
RegisterTaskFatal("reward_period_task", &BaseConfig{
Enabled: true,
RunAtStart: true,
Schedule: "@every 1m",
}, func(ctx context.Context, _ *models.User, _ Config) error {
reward.StartRewardTask()
return nil
})
}

func registerCloudbrainPointDeductTask() {
RegisterTaskFatal("cloudbrain_point_deduct_task", &BaseConfig{
Enabled: true,
RunAtStart: true,
Schedule: "@every 1m",
}, func(ctx context.Context, _ *models.User, _ Config) error {
reward.StartCloudbrainPointDeductTask()
return nil
})
}

func registerSyncResourceSpecs() {
RegisterTaskFatal("sync_grampus_specs", &BaseConfig{
Enabled: true,
@@ -253,4 +276,7 @@ func initBasicTasks() {
registerHandleOrgStatistic()
registerSyncResourceSpecs()
registerSyncModelArtsTempJobs()

//registerRewardPeriodTask()
registerCloudbrainPointDeductTask()
}

+ 25
- 1
modules/dataset/dataset.go View File

@@ -1,6 +1,10 @@
package dataset

import "code.gitea.io/gitea/models"
import (
"strings"

"code.gitea.io/gitea/models"
)

func GetResourceType(cloudbrainType int) string {
if cloudbrainType == 0 {
@@ -33,3 +37,23 @@ func IsShowDataSetOfCurrentRepo(repoID int64) bool {
return true

}

func GetFilterDeletedAttachments(uuids string) (string, string) {
attachments, err := models.GetAttachmentsByUUIDs(strings.Split(uuids, ";"))
if err != nil {
return "", ""
}
uuidR := ""
filenames := ""
for i, attachment := range attachments {
if i == 0 {
uuidR += attachment.UUID
filenames += attachment.Name
} else {
uuidR += ";" + attachment.UUID
filenames += ";" + attachment.Name
}
}
return uuidR, filenames

}

+ 22
- 0
modules/eventsource/manager_run.go View File

@@ -5,6 +5,7 @@
package eventsource

import (
"code.gitea.io/gitea/services/reward"
"context"
"time"

@@ -24,9 +25,29 @@ func (m *Manager) Init() {
func (m *Manager) Run(ctx context.Context) {
then := timeutil.TimeStampNow().Add(-2)
timer := time.NewTicker(setting.UI.Notification.EventSourceUpdateTime)
rewardThen := then
rewardTimer := time.NewTicker(setting.UI.Notification.RewardNotifyUpdateTime)
loop:
for {
select {
case <-rewardTimer.C:
log.Debug("rewardTimer run")
now := timeutil.TimeStampNow().Add(-2)
list := reward.GetRewardOperation(rewardThen, now)
if list != nil {
log.Debug("GetRewardOperation list=%v", list)
for _, l := range list {
m.SendMessage(l.UserId, &Event{
Name: "reward-operation",
Data: l.Msg,
})
}
}

rewardThen = now
}

select {
case <-ctx.Done():
timer.Stop()
break loop
@@ -44,6 +65,7 @@ loop:
})
}
then = now
default:
}
}
m.UnregisterAll()


+ 56
- 5
modules/grampus/grampus.go View File

@@ -22,9 +22,6 @@ const (
GpuWorkDir = "/tmp/"
NpuWorkDir = "/cache/"

CommandPrepareScript = ";mkdir -p output;mkdir -p code;mkdir -p dataset;echo \"start loading script\";wget -q https://git.openi.org.cn/OpenIOSSG/script_for_grampus/archive/master.zip;" +
"echo \"finish loading script\";unzip -q master.zip;cd script_for_grampus;chmod 777 downloader_for_obs uploader_for_npu downloader_for_minio uploader_for_gpu;"

CodeArchiveName = "master.zip"
)

@@ -34,6 +31,9 @@ var (
ImageInfos *setting.StImageInfosModelArts

SpecialPools *models.SpecialPools

CommandPrepareScript = ";mkdir -p output;mkdir -p code;mkdir -p dataset;mkdir -p pretrainmodel;echo \"start loading script\";wget -q https://git.openi.org.cn/OpenIOSSG/%s/archive/master.zip;" +
"echo \"finish loading script\";unzip -q master.zip;cd %s;chmod 777 downloader_for_obs uploader_for_npu downloader_for_minio uploader_for_gpu;"
)

type GenerateTrainJobReq struct {
@@ -62,16 +62,60 @@ type GenerateTrainJobReq struct {
TotalVersionCount int
ComputeResource string
ProcessType string
DatasetName string

DatasetNames string
DatasetInfos map[string]models.DatasetInfo
Params string
ModelName string
LabelName string
CkptName string
ModelVersion string
PreTrainModelPath string
PreTrainModelUrl string
Spec *models.Specification
}

func getEndPoint() string {
index := strings.Index(setting.Endpoint, "//")
endpoint := setting.Endpoint[index+2:]
return endpoint
}

func getDatasetGrampus(datasetInfos map[string]models.DatasetInfo) []models.GrampusDataset {
var datasetGrampus []models.GrampusDataset
endPoint := getEndPoint()
for _, datasetInfo := range datasetInfos {
datasetGrampus = append(datasetGrampus, models.GrampusDataset{
Name: datasetInfo.FullName,
Bucket: setting.Bucket,
EndPoint: endPoint,
ObjectKey: datasetInfo.DataLocalPath + datasetInfo.FullName,
})

}
return datasetGrampus
}

func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error) {
createTime := timeutil.TimeStampNow()

centerID, centerName := getCentersParamter(ctx, req)

var datasetGrampus, modelGrampus []models.GrampusDataset
if ProcessorTypeNPU == req.ProcessType {
datasetGrampus = getDatasetGrampus(req.DatasetInfos)
if len(req.ModelName) != 0 {
modelGrampus = []models.GrampusDataset{
{
Name: req.ModelName,
Bucket: setting.Bucket,
EndPoint: getEndPoint(),
ObjectKey: req.PreTrainModelPath,
},
}
}
}

jobResult, err := createJob(models.CreateGrampusJobRequest{
Name: req.JobName,
Tasks: []models.GrampusTasks{
@@ -84,6 +128,8 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
CenterID: centerID,
CenterName: centerName,
ReplicaNum: 1,
Datasets: datasetGrampus,
Models: modelGrampus,
},
},
})
@@ -103,7 +149,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
JobType: string(models.JobTypeTrain),
Type: models.TypeC2Net,
Uuid: req.Uuid,
DatasetName: req.DatasetName,
DatasetName: req.DatasetNames,
CommitID: req.CommitID,
IsLatestVersion: req.IsLatestVersion,
ComputeResource: req.ComputeResource,
@@ -121,6 +167,11 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
CreatedUnix: createTime,
UpdatedUnix: createTime,
Spec: req.Spec,
ModelName: req.ModelName,
ModelVersion: req.ModelVersion,
LabelName: req.LabelName,
PreTrainModelUrl: req.PreTrainModelUrl,
CkptName: req.CkptName,
})

if err != nil {


+ 6
- 5
modules/grampus/resty.go View File

@@ -1,14 +1,15 @@
package grampus

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-resty/resty/v2"
"net/http"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/go-resty/resty/v2"
)

var (
@@ -236,7 +237,7 @@ func GetTrainJobLog(jobID string) (string, error) {
return logContent, fmt.Errorf("json.Unmarshal failed(%s): %v", res.String(), err.Error())
}
log.Error("GetTrainJobLog failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg)
return logContent, fmt.Errorf("GetTrainJobLog failed(%d):%s(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg)
return logContent, fmt.Errorf("GetTrainJobLog failed(%d):%d(%s)", res.StatusCode(), temp.ErrorCode, temp.ErrorMsg)
}

logContent = res.String()


+ 18
- 2
modules/modelarts/modelarts.go View File

@@ -104,6 +104,11 @@ type GenerateTrainJobReq struct {
UserCommand string
DatasetName string
Spec *models.Specification
ModelName string
LabelName string
CkptName string
ModelVersion string
PreTrainModelUrl string
}

type GenerateInferenceJobReq struct {
@@ -148,8 +153,9 @@ type VersionInfo struct {

type Flavor struct {
Info []struct {
Code string `json:"code"`
Value string `json:"value"`
Code string `json:"code"`
Value string `json:"value"`
UnitPrice int64 `json:"unitPrice"`
} `json:"flavor"`
}

@@ -439,6 +445,11 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
CreatedUnix: createTime,
UpdatedUnix: createTime,
Spec: req.Spec,
ModelName: req.ModelName,
ModelVersion: req.ModelVersion,
LabelName: req.LabelName,
PreTrainModelUrl: req.PreTrainModelUrl,
CkptName: req.CkptName,
})

if createErr != nil {
@@ -588,6 +599,11 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job
CreatedUnix: createTime,
UpdatedUnix: createTime,
Spec: req.Spec,
ModelName: req.ModelName,
ModelVersion: req.ModelVersion,
LabelName: req.LabelName,
PreTrainModelUrl: req.PreTrainModelUrl,
CkptName: req.CkptName,
})
if createErr != nil {
log.Error("CreateCloudbrain(%s) failed:%v", req.JobName, createErr.Error())


+ 76
- 0
modules/notification/action/action.go View File

@@ -5,6 +5,7 @@
package action

import (
"code.gitea.io/gitea/modules/auth"
"encoding/json"
"fmt"
"path"
@@ -345,3 +346,78 @@ func (a *actionNotifier) NotifyOtherTask(doer *models.User, repo *models.Reposit
log.Error("notifyWatchers: %v", err)
}
}

func (t *actionNotifier) NotifyWechatBind(user *models.User, wechatOpenId string) {
act := &models.Action{
ActUserID: user.ID,
ActUser: user,
OpType: models.ActionBindWechat,
IsPrivate: true,
Content: wechatOpenId,
}
if err := models.NotifyWatchers(act); err != nil {
log.Error("notifyWatchers: %v", err)
}
}

func (t *actionNotifier) NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) {
switch action {
case "recommend":
act := &models.Action{
OpType: models.ActionDatasetRecommended,
ActUserID: dataset.UserID,
RepoID: dataset.RepoID,
IsPrivate: false,
Content: fmt.Sprintf("%d|%s", dataset.ID, dataset.Title),
}

if err := models.NotifyWatchers(act); err != nil {
log.Error("notifyWatchers: %v", err)
}
}
}

func (t *actionNotifier) NotifyCreateImage(doer *models.User, image models.Image) {
act := &models.Action{
ActUserID: doer.ID,
ActUser: doer,
OpType: models.ActionCreateImage,
IsPrivate: image.IsPrivate,
Content: fmt.Sprintf("%d|%s", image.ID, image.Tag),
}
if err := models.NotifyWatchers(act); err != nil {
log.Error("notifyWatchers: %v", err)
}
}

func (t *actionNotifier) NotifyImageRecommend(optUser *models.User, image *models.Image, action string) {
u, err := models.GetUserByID(image.UID)
if err != nil {
return
}
switch action {
case "recommend":
act := &models.Action{
ActUserID: u.ID,
ActUser: u,
OpType: models.ActionImageRecommend,
IsPrivate: false,
Content: fmt.Sprintf("%d|%s", image.ID, image.Tag),
}
if err := models.NotifyWatchers(act); err != nil {
log.Error("notifyWatchers: %v", err)
}
}
}

func (t *actionNotifier) NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) {
act := &models.Action{
ActUserID: user.ID,
ActUser: user,
OpType: models.ActionChangeUserAvatar,
IsPrivate: true,
}
if err := models.NotifyWatchers(act); err != nil {
log.Error("notifyWatchers: %v", err)
}
}

+ 6
- 0
modules/notification/base/notifier.go View File

@@ -6,6 +6,7 @@ package base

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/repository"
)

@@ -56,6 +57,11 @@ type Notifier interface {
NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string)

NotifyOtherTask(doer *models.User, repo *models.Repository, id string, name string, optype models.ActionType)
NotifyWechatBind(user *models.User, wechatOpenId string)
NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string)
NotifyCreateImage(doer *models.User, image models.Image)
NotifyImageRecommend(optUser *models.User, image *models.Image, action string)
NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm)

NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string)
}

+ 18
- 0
modules/notification/base/null.go View File

@@ -6,6 +6,7 @@ package base

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/repository"
)

@@ -159,6 +160,23 @@ func (*NullNotifier) NotifyOtherTask(doer *models.User, repo *models.Repository,

}

func (*NullNotifier) NotifyWechatBind(user *models.User, wechatOpenId string) {

}

func (*NullNotifier) NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) {
}

func (*NullNotifier) NotifyCreateImage(doer *models.User, image models.Image) {
}

func (*NullNotifier) NotifyImageRecommend(optUser *models.User, image *models.Image, action string) {
}

func (*NullNotifier) NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) {

}

func (*NullNotifier) NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) {

}

+ 38
- 0
modules/notification/notification.go View File

@@ -6,10 +6,12 @@ package notification

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/notification/action"
"code.gitea.io/gitea/modules/notification/base"
"code.gitea.io/gitea/modules/notification/indexer"
"code.gitea.io/gitea/modules/notification/mail"
"code.gitea.io/gitea/modules/notification/reward"
"code.gitea.io/gitea/modules/notification/ui"
"code.gitea.io/gitea/modules/notification/webhook"
wechatNotifier "code.gitea.io/gitea/modules/notification/wechat"
@@ -37,6 +39,7 @@ func NewContext() {
RegisterNotifier(webhook.NewNotifier())
RegisterNotifier(action.NewNotifier())
RegisterNotifier(wechatNotifier.NewNotifier())
RegisterNotifier(reward.NewNotifier())
}

// NotifyUploadAttachment notifies attachment upload message to notifiers
@@ -272,6 +275,41 @@ func NotifySyncDeleteRef(pusher *models.User, repo *models.Repository, refType,
}
}

// NotifyWechatBind notifies wechat bind
func NotifyWechatBind(user *models.User, wechatOpenId string) {
for _, notifier := range notifiers {
notifier.NotifyWechatBind(user, wechatOpenId)
}
}

// NotifyDatasetRecommend
func NotifyDatasetRecommend(optUser *models.User, dataset *models.Dataset, action string) {
for _, notifier := range notifiers {
notifier.NotifyDatasetRecommend(optUser, dataset, action)
}
}

// NotifyDatasetRecommend
func NotifyCreateImage(doer *models.User, image models.Image) {
for _, notifier := range notifiers {
notifier.NotifyCreateImage(doer, image)
}
}

// NotifyDatasetRecommend
func NotifyImageRecommend(optUser *models.User, image *models.Image, action string) {
for _, notifier := range notifiers {
notifier.NotifyImageRecommend(optUser, image, action)
}
}

// NotifyDatasetRecommend
func NotifyChangeUserAvatar(user *models.User, form auth.AvatarForm) {
for _, notifier := range notifiers {
notifier.NotifyChangeUserAvatar(user, form)
}
}

// NotifyChangeCloudbrainStatus
func NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) {
for _, notifier := range notifiers {


+ 27
- 0
modules/notification/reward/point.go View File

@@ -0,0 +1,27 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification/base"
)

type pointNotifier struct {
base.NullNotifier
}

var (
_ base.Notifier = &pointNotifier{}
)

// NewNotifier create a new wechatNotifier notifier
func NewNotifier() base.Notifier {
return &pointNotifier{}
}

func (*pointNotifier) NotifyChangeCloudbrainStatus(cloudbrain *models.Cloudbrain, oldStatus string) {
log.Info("pointNotifier NotifyChangeCloudbrainStatus cloudbrain.id=%d cloudbrain.status=%s oldStatus=%s", cloudbrain.ID, cloudbrain.Status, oldStatus)
if cloudbrain.IsRunning() || cloudbrain.IsTerminal() {
models.StatusChangeChan <- cloudbrain
}
}

+ 83
- 1
modules/redis/redis_client/client.go View File

@@ -76,7 +76,7 @@ func HEXISTS(conn redis.Conn, key string, subKey string) (bool, error) {

}

func Expire(conn redis.Conn, key string, seconds int) error {
func EXPIRE(conn redis.Conn, key string, seconds int) error {
_, err := conn.Do("EXPIRE", key, seconds)
return err

@@ -145,3 +145,85 @@ func TTL(key string) (int, error) {
return n, nil

}

func IncrBy(key string, n int64) (int64, error) {
redisClient := labelmsg.Get()
defer redisClient.Close()

reply, err := redisClient.Do("INCRBY", key, n)
if err != nil {
return 0, err
}
i, err := strconv.ParseInt(fmt.Sprint(reply), 10, 64)
return i, nil

}

func Expire(key string, expireTime time.Duration) error {
redisClient := labelmsg.Get()
defer redisClient.Close()

_, err := redisClient.Do("EXPIRE", key, int64(expireTime.Seconds()))
if err != nil {
return err
}
return nil

}

//GetInt64 get redis value by Get(key)
//and then parse the value to int64
//return {isExist(bool)} {value(int64)} {error(error)}
func GetInt64(key string) (bool, int64, error) {
str, err := Get(key)
if err != nil {
return false, 0, err
}
if str == "" {
return false, 0, nil
}

i, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return false, 0, err
}
return true, i, nil

}

func ZAdd(key, value string, score float64) error {
redisClient := labelmsg.Get()
defer redisClient.Close()

_, err := redisClient.Do("ZADD", key, score, value)
if err != nil {
return err
}
return nil
}

func ZRangeByScore(key string, min, max float64) ([]string, error) {
redisClient := labelmsg.Get()
defer redisClient.Close()

reply, err := redisClient.Do("ZRANGEBYSCORE", key, min, max)
if err != nil {
return nil, err
}
if reply == nil {
return nil, err
}
s, _ := redis.Strings(reply, nil)
return s, nil
}

func ZRemRangeByScore(key string, min, max float64) error {
redisClient := labelmsg.Get()
defer redisClient.Close()

_, err := redisClient.Do("ZREMRANGEBYSCORE", key, min, max)
if err != nil {
return err
}
return nil
}

+ 17
- 0
modules/redis/redis_key/account_redis_key.go View File

@@ -0,0 +1,17 @@
package redis_key

import "fmt"

const ACCOUNT_REDIS_PREFIX = "account"

func PointAccountOperateLock(userId int64) string {
return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "point", "operate", "lock")
}

func PointAccountInfo(userId int64) string {
return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "info")
}

func PointAccountInitLock(userId int64) string {
return KeyJoin(ACCOUNT_REDIS_PREFIX, fmt.Sprint(userId), "init", "lock")
}

+ 7
- 0
modules/redis/redis_key/cloudbrain_redis_key.go View File

@@ -0,0 +1,7 @@
package redis_key

const CLOUDBRAIN_PREFIX = "cloudbrain"

func CloudbrainBindingJobNameKey(repoId string, jobType string, jobName string) string {
return KeyJoin(CLOUDBRAIN_PREFIX, repoId, jobType, jobName, "redis_key")
}

+ 2
- 0
modules/redis/redis_key/key_base.go View File

@@ -4,6 +4,8 @@ import "strings"

const KEY_SEPARATE = ":"

const EMPTY_REDIS_VAL = "Nil"

func KeyJoin(keys ...string) string {
var build strings.Builder
for _, v := range keys {


+ 26
- 0
modules/redis/redis_key/limit_redis_key.go View File

@@ -0,0 +1,26 @@
package redis_key

import (
"code.gitea.io/gitea/models"
"fmt"
)

const LIMIT_REDIS_PREFIX = "limit"

func LimitCount(userId int64, limitCode string, limitType string, scope string, period *models.PeriodResult) string {
if scope == models.LimitScopeAllUsers.Name() {
if period == nil {
return KeyJoin(LIMIT_REDIS_PREFIX, limitCode, limitType, "count")
}
return KeyJoin(LIMIT_REDIS_PREFIX, limitCode, limitType, fmt.Sprint(period.StartTime.Unix()), fmt.Sprint(period.EndTime.Unix()), "count")
}
if period == nil {
return KeyJoin(LIMIT_REDIS_PREFIX, "uid", fmt.Sprint(userId), limitCode, limitType, "count")
}
return KeyJoin(LIMIT_REDIS_PREFIX, "uid", fmt.Sprint(userId), limitCode, limitType, fmt.Sprint(period.StartTime.Unix()), fmt.Sprint(period.EndTime.Unix()), "count")

}

func LimitConfig(limitType string) string {
return KeyJoin(LIMIT_REDIS_PREFIX, limitType, "config")
}

+ 21
- 0
modules/redis/redis_key/reward_redis_key.go View File

@@ -0,0 +1,21 @@
package redis_key

import (
"code.gitea.io/gitea/modules/setting"
"fmt"
"strings"
)

const REWARD_REDIS_PREFIX = "reward"

func RewardOperateLock(requestId string, sourceType string, operateType string) string {
return KeyJoin(REWARD_REDIS_PREFIX, requestId, sourceType, operateType, "send")
}

func RewardOperateNotification() string {
return KeyJoin(REWARD_REDIS_PREFIX, "operate", strings.ReplaceAll(setting.AppURL, "/", ""), "notification")
}

func RewardTaskRunningLock(taskId int64) string {
return KeyJoin(REWARD_REDIS_PREFIX, "periodic_task", fmt.Sprint(taskId), "lock")
}

+ 10
- 0
modules/redis/redis_key/serial_redis_key.go View File

@@ -0,0 +1,10 @@
package redis_key

import "time"

const SERIAL_REDIS_PREFIX = "serial"

func RewardSerialCounter(now time.Time) string {
h := now.Format("200601021504")
return KeyJoin(SERIAL_REDIS_PREFIX, "reward_operate", h, "counter")
}

+ 14
- 0
modules/redis/redis_key/task_redis_key.go View File

@@ -0,0 +1,14 @@
package redis_key

const TASK_REDIS_PREFIX = "task"

func TaskAccomplishLock(sourceId string, taskType string) string {
return KeyJoin(TASK_REDIS_PREFIX, sourceId, taskType, "accomplish")
}

func TaskConfigList() string {
return KeyJoin(TASK_REDIS_PREFIX, "config", "list")
}
func TaskConfigOperateLock(taskCode, rewardType string) string {
return KeyJoin(TASK_REDIS_PREFIX, "config", "operate", "lock")
}

+ 16
- 9
modules/redis/redis_lock/lock.go View File

@@ -1,8 +1,9 @@
package redis_lock

import (
"code.gitea.io/gitea/modules/redis/redis_client"
"time"

"code.gitea.io/gitea/modules/redis/redis_client"
)

type DistributeLock struct {
@@ -13,26 +14,32 @@ func NewDistributeLock(lockKey string) *DistributeLock {
return &DistributeLock{lockKey: lockKey}
}

func (lock *DistributeLock) Lock(expireTime time.Duration) bool {
isOk, _ := redis_client.Setnx(lock.lockKey, "", expireTime)
return isOk
func (lock *DistributeLock) Lock(expireTime time.Duration) (bool, error) {
isOk, err := redis_client.Setnx(lock.lockKey, "", expireTime)
if err != nil {
return false, err
}
return isOk, nil
}

func (lock *DistributeLock) LockWithWait(expireTime time.Duration, waitTime time.Duration) bool {
func (lock *DistributeLock) LockWithWait(expireTime time.Duration, waitTime time.Duration) (bool, error) {
start := time.Now().Unix() * 1000
duration := waitTime.Milliseconds()
for {
isOk, _ := redis_client.Setnx(lock.lockKey, "", expireTime)
isOk, err := redis_client.Setnx(lock.lockKey, "", expireTime)
if err != nil {
return false, err
}
if isOk {
return true
return true, nil
}
if time.Now().Unix()*1000-start > duration {
return false
return false, nil
}
time.Sleep(50 * time.Millisecond)
}

return false
return false, nil
}

func (lock *DistributeLock) UnLock() error {


+ 45
- 24
modules/setting/setting.go View File

@@ -66,9 +66,10 @@ const (
)

type C2NetSequenceInfo struct {
ID int `json:"id"`
Name string `json:"name"`
Content string `json:"content"`
ID int `json:"id"`
Name string `json:"name"`
Content string `json:"content"`
ContentEN string `json:"content_en"`
}

type C2NetSqInfos struct {
@@ -214,10 +215,11 @@ var (
UseServiceWorker bool

Notification struct {
MinTimeout time.Duration
TimeoutStep time.Duration
MaxTimeout time.Duration
EventSourceUpdateTime time.Duration
MinTimeout time.Duration
TimeoutStep time.Duration
MaxTimeout time.Duration
EventSourceUpdateTime time.Duration
RewardNotifyUpdateTime time.Duration
} `ini:"ui.notification"`

Admin struct {
@@ -251,15 +253,17 @@ var (
Themes: []string{`gitea`, `arc-green`},
Reactions: []string{`+1`, `-1`, `laugh`, `hooray`, `confused`, `heart`, `rocket`, `eyes`},
Notification: struct {
MinTimeout time.Duration
TimeoutStep time.Duration
MaxTimeout time.Duration
EventSourceUpdateTime time.Duration
MinTimeout time.Duration
TimeoutStep time.Duration
MaxTimeout time.Duration
EventSourceUpdateTime time.Duration
RewardNotifyUpdateTime time.Duration
}{
MinTimeout: 10 * time.Second,
TimeoutStep: 10 * time.Second,
MaxTimeout: 60 * time.Second,
EventSourceUpdateTime: 10 * time.Second,
MinTimeout: 10 * time.Second,
TimeoutStep: 10 * time.Second,
MaxTimeout: 60 * time.Second,
EventSourceUpdateTime: 10 * time.Second,
RewardNotifyUpdateTime: 2 * time.Second,
},
Admin: struct {
UserPagingNum int
@@ -583,12 +587,13 @@ var (

//grampus config
Grampus = struct {
Env string
Host string
UserName string
Password string
SpecialPools string
C2NetSequence string
Env string
Host string
UserName string
Password string
SpecialPools string
C2NetSequence string
SyncScriptProject string
}{}

C2NetInfos *C2NetSqInfos
@@ -610,6 +615,13 @@ var (
WechatQRCodeExpireSeconds int
WechatAuthSwitch bool

//point config
CloudBrainPaySwitch bool
CloudBrainPayDelay time.Duration
CloudBrainPayInterval time.Duration
DeductTaskRange time.Duration
DeductTaskRangeForFirst time.Duration

//wechat auto reply config
UserNameOfWechatReply string
RepoNameOfWechatReply string
@@ -1464,7 +1476,7 @@ func NewContext() {
FlavorInfos = sec.Key("FLAVOR_INFOS").MustString("")
TrainJobFLAVORINFOS = sec.Key("TrainJob_FLAVOR_INFOS").MustString("")
ModelArtsSpecialPools = sec.Key("SPECIAL_POOL").MustString("")
ModelArtsMultiNode=sec.Key("MULTI_NODE").MustString("")
ModelArtsMultiNode = sec.Key("MULTI_NODE").MustString("")

sec = Cfg.Section("elk")
ElkUrl = sec.Key("ELKURL").MustString("")
@@ -1481,12 +1493,13 @@ func NewContext() {
WechatAppId = sec.Key("APP_ID").MustString("wxba77b915a305a57d")
WechatAppSecret = sec.Key("APP_SECRET").MustString("")
WechatQRCodeExpireSeconds = sec.Key("QR_CODE_EXPIRE_SECONDS").MustInt(120)
WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(true)
WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(false)
UserNameOfWechatReply = sec.Key("AUTO_REPLY_USER_NAME").MustString("OpenIOSSG")
RepoNameOfWechatReply = sec.Key("AUTO_REPLY_REPO_NAME").MustString("promote")
RefNameOfWechatReply = sec.Key("AUTO_REPLY_REF_NAME").MustString("master")
TreePathOfAutoMsgReply = sec.Key("AUTO_REPLY_TREE_PATH").MustString("wechat/auto_reply.json")
TreePathOfSubscribe = sec.Key("SUBSCRIBE_TREE_PATH").MustString("wechat/subscribe_reply.json")
WechatAuthSwitch = sec.Key("AUTH_SWITCH").MustBool(false)
CloudbrainStartedTemplateId = sec.Key("CLOUDBRAIN_STARTED_TEMPLATE_ID").MustString("")
CloudbrainStartedNotifyList = strings.Split(sec.Key("CLOUDBRAIN_STARTED_NOTIFY_LIST").MustString("DEBUG"), ",")
CloudbrainStartedTitle = sec.Key("CLOUDBRAIN_STARTED_TITLE").MustString("您好,您提交的算力资源申请已通过,任务已启动,请您关注运行情况。")
@@ -1496,6 +1509,12 @@ func NewContext() {
CloudbrainStoppedTitle = sec.Key("CLOUDBRAIN_STOPPED_TITLE").MustString("您好,您申请的算力资源已结束使用,任务已完成运行,状态为%s,请您关注运行结果")
CloudbrainStoppedRemark = sec.Key("CLOUDBRAIN_STOPPED_REMARK").MustString("感谢您的耐心等待。")

sec = Cfg.Section("point")
CloudBrainPaySwitch = sec.Key("CLOUDBRAIN_PAY_SWITCH").MustBool(false)
CloudBrainPayDelay = sec.Key("CLOUDBRAIN_PAY_DELAY").MustDuration(30 * time.Minute)
CloudBrainPayInterval = sec.Key("CLOUDBRAIN_PAY_INTERVAL").MustDuration(60 * time.Minute)
DeductTaskRange = sec.Key("DEDUCT_TASK_RANGE").MustDuration(30 * time.Minute)
DeductTaskRangeForFirst = sec.Key("DEDUCT_TASK_RANGE_FOR_FIRST").MustDuration(3 * time.Hour)
SetRadarMapConfig()

sec = Cfg.Section("warn_mail")
@@ -1552,12 +1571,14 @@ func getGrampusConfig() {
Grampus.UserName = sec.Key("USERNAME").MustString("")
Grampus.Password = sec.Key("PASSWORD").MustString("")
Grampus.SpecialPools = sec.Key("SPECIAL_POOL").MustString("")
Grampus.C2NetSequence = sec.Key("C2NET_SEQUENCE").MustString("{\"sequence\":[{\"id\":1,\"name\":\"cloudbrain_one\",\"content\":\"鹏城云脑一号\"},{\"id\":2,\"name\":\"cloudbrain_two\",\"content\":\"鹏城云脑二号\"},{\"id\":3,\"name\":\"beida\",\"content\":\"北大人工智能集群系统\"},{\"id\":4,\"name\":\"hefei\",\"content\":\"合肥类脑智能开放平台\"},{\"id\":5,\"name\":\"wuhan\",\"content\":\"武汉人工智能计算中心\"},{\"id\":6,\"name\":\"xian\",\"content\":\"西安未来人工智能计算中心\"},{\"id\":7,\"pclcci\":\"more\",\"content\":\"鹏城云计算所\"},{\"id\":8,\"name\":\"xuchang\",\"content\":\"中原人工智能计算中心\"},{\"id\":9,\"name\":\"chengdu\",\"content\":\"成都人工智能计算中心\"},{\"id\":10,\"name\":\"more\",\"content\":\"横琴先进智能计算中心\"},{\"id\":11,\"name\":\"more\",\"content\":\"国家超级计算济南中心\"}]}")
Grampus.C2NetSequence = sec.Key("C2NET_SEQUENCE").MustString("{\"sequence\":[{\"id\":1,\"name\":\"cloudbrain_one\",\"content\":\"鹏城云脑一号\",\"content_en\":\"Pencheng Cloudbrain Ⅰ\"},{\"id\":2,\"name\":\"cloudbrain_two\",\"content\":\"鹏城云脑二号\",\"content_en\":\"Pencheng Cloudbrain Ⅱ\"},{\"id\":3,\"name\":\"beida\",\"content\":\"北大人工智能集群系统\",\"content_en\":\"Peking University AI Center\"},{\"id\":4,\"name\":\"hefei\",\"content\":\"合肥类脑智能开放平台\",\"content_en\":\"Hefei AI Center\"},{\"id\":5,\"name\":\"wuhan\",\"content\":\"武汉人工智能计算中心\",\"content_en\":\"Wuhan AI Center\"},{\"id\":6,\"name\":\"xian\",\"content\":\"西安未来人工智能计算中心\",\"content_en\":\"Xi'an AI Center\"},{\"id\":7,\"pclcci\":\"more\",\"content\":\"鹏城云计算所\",\"content_en\":\"Pengcheng Cloud Computing Institute\"},{\"id\":8,\"name\":\"xuchang\",\"content\":\"中原人工智能计算中心\",\"content_en\":\"Zhongyuan AI Center\"},{\"id\":9,\"name\":\"chengdu\",\"content\":\"成都人工智能计算中心\",\"content_en\":\"Chengdu AI Center\"},{\"id\":10,\"name\":\"more\",\"content\":\"横琴先进智能计算中心\",\"content_en\":\"Hengqin AI Center\"},{\"id\":11,\"name\":\"more\",\"content\":\"国家超级计算济南中心\",\"content_en\":\"HPC & AI Center\"}]}")
if Grampus.C2NetSequence != "" {
if err := json.Unmarshal([]byte(Grampus.C2NetSequence), &C2NetInfos); err != nil {
log.Error("Unmarshal(C2NetSequence) failed:%v", err)
}
}
Grampus.SyncScriptProject = sec.Key("SYNC_SCRIPT_PROJECT").MustString("script_for_grampus")

}

func SetRadarMapConfig() {


+ 1
- 1
modules/templates/helper.go View File

@@ -791,7 +791,7 @@ func GetRefName(ref string) string {
return reg.ReplaceAllString(ref, "")
}

func MB2GB(size int64) string {
func MB2GB(size int) string {
s := strconv.FormatFloat(float64(size)/float64(1024), 'f', 2, 64)
for strings.HasSuffix(s, "0") {
s = strings.TrimSuffix(s, "0")


+ 10
- 0
modules/util/uuid_util.go View File

@@ -0,0 +1,10 @@
package util

import (
gouuid "github.com/satori/go.uuid"
"strings"
)

func UUID() string {
return strings.ReplaceAll(gouuid.NewV4().String(), "-", "")
}

+ 32
- 5
options/locale/locale_en-US.ini View File

@@ -23,6 +23,7 @@ signed_in_as = Signed in as
enable_javascript = This website works better with JavaScript.
toc = Table of Contents
return=Back OpenI
calculation_points = Calculation Points

username = Username
email = Email Address
@@ -69,6 +70,10 @@ your_dashboard = Dashboard
your_profile = Profile
your_starred = Starred
your_settings = Settings
invite_friends = Invite Friends
your_friend=Your friend
invite_you_to_join_the_OpenI_AI_Collaboration_Platform_and_enjoy_abundant_free_computing_resources=invite you to join the OpenI AI Collaboration Platform and enjoy abundant free computing resources!
recommender=Recommender

all = All
sources = Sources
@@ -531,6 +536,10 @@ form.name_reserved = The username '%s' is reserved.
form.name_pattern_not_allowed = The pattern '%s' is not allowed in a username.
form.name_chars_not_allowed = User name '%s' contains invalid characters.

static.invitationdetailsheetname=User Invitation Detail
static.invitationNum=User Invitation Count
static.invitationsheetname=User Invitation
static.srcUserId=Recommended User ID
static.sheetname=User Analysis
static.id=ID
static.name=User Name
@@ -1055,7 +1064,7 @@ image_delete_fail=Failed to delete image, please try again later.
image_overwrite=You had submitted the same name image before, are you sure to overwrite the original image?
download=Download
score=Score
wait_count_start = There are currently
wait_count_start = There are currently
wait_count_end = tasks queued
file_limit_100 = Display up to 100 files or folders in a single directory
images.name = Image Tag
@@ -1092,6 +1101,7 @@ cloudbrain_operate = Operate
cloudbrain_status_createtime = Status/Createtime
cloudbrain_status_runtime = Running Time
cloudbrain_jobname_err=Name must start with a lowercase letter or number,can include lowercase letter,number,_ and -,can not end with _, and can be up to 36 characters long.
cloudbrain_samejob_err=A task with the same name has been created, the system is processing it, please wait a minute.
cloudbrain_bootfile_err=The bootfile does not exist in the repository
cloudbrain_query_fail=Failed to query cloudbrain information.
cloudbrain.mirror_tag = Mirror Tag
@@ -1266,7 +1276,7 @@ model.manage.modellabel=Model label
model.manage.modeldesc=Model description
model.manage.baseinfo=Base Information
modelconvert.notcreate=No model conversion task has been created.
modelconvert.importfirst1=Please import the
modelconvert.importfirst1=Please import the
modelconvert.importfirst2=model
modelconvert.importfirst3=first, then converts it.
modelconvert.download=Download
@@ -1291,6 +1301,7 @@ modelconvert.taskurlname=Model transformation task
log_scroll_start=Scroll to top
log_scroll_end=Scroll to bottom
modelconvert.tasknameempty=Please enter a task name.
modelconvert.modelfileempty=Please choose a model file.
modelconvert.inputshapeerror=Format input error, please input such as: 1,1,32,32, corresponding to the input data format.

modelconvert.manage.create_error1=A model transformation task with the same name already exists.
@@ -3072,6 +3083,11 @@ task_createmodel=`created new model <a href="%s/modelmanage/show_model_info?name
task_gputrainjob=`created CPU/GPU training task <a href="%s/cloudbrain/train-job/%s">%s</a>`
task_c2netnputrainjob=`created NPU training task <a href="%s/grampus/train-job/%s">%s</a>`
task_c2netgputrainjob=`created CPU/GPU training task <a href="%s/grampus/train-job/%s">%s</a>`
binded_wechat=binded WeChat
dataset_recommended=`created dataset <a href="%s/datasets">%s</a> was set as recommended dataset`
create_image=`committed image <span style="font-weight:bold;">%s</span>`
image_recommended=`committed image <span style="font-weight:bold;">%s</span> was set as recommended image`
update_user_avatar=updated avatar

[tool]
ago = %s ago
@@ -3191,7 +3207,7 @@ wrong_specification=You cannot use this specification, please choose another ite
resource_use=Resource Occupancy

job_name_rule = Please enter letters, numbers, _ and - up to 64 characters and cannot end with a dash (-).
train_dataset_path_rule = The dataset location is stored in the environment variable <strong style="color:#010101">data_url</strong>, and the output path is stored in the environment variable <strong style="color:#010101">train_url</strong>.
train_dataset_path_rule = The dataset location is stored in the environment variable <strong style="color:#010101">data_url</strong>, the pre-trained model is storaged in the environment <strong style="color:#010101">ckpt_url</strong>, and the output path is stored in the environment variable <strong style="color:#010101">train_url</strong>.
infer_dataset_path_rule = The dataset location is stored in the environment variable <strong style="color:#010101">data_url</strong>, and the output path is stored in the environment variable <strong style="color:#010101">result_url</strong>.
view_sample = View sample
inference_output_path_rule = The inference output path is stored in the environment variable result_url.
@@ -3228,5 +3244,16 @@ Stopped_success_update_status_fail=Succeed in stopping th job, but failed to upd
load_code_failed=Fail to load code, please check if the right branch is selected.

error.dataset_select = dataset select error:the count exceed the limit or has same name
new_train_gpu_tooltips = The code is storaged in <strong style="color:#010101">%s</strong>, the dataset is storaged in <strong style="color:#010101">%s</strong>, and please put your model into <strong style="color:#010101">%s</strong> then you can download it online
new_infer_gpu_tooltips = The dataset is stored in <strong style="color:#010101">%s</strong>, the model file is stored in <strong style="color:#010101">%s</strong>, please store the inference output in <strong style="color:#010101">%s</strong> for subsequent downloads.
new_train_gpu_tooltips = The code is storaged in <strong style="color:#010101">%s</strong>, the dataset is storaged in <strong style="color:#010101">%s</strong>, the pre-trained model is storaged in the environment <strong style="color:#010101">%s</strong>, and please put your model into <strong style="color:#010101">%s</strong> then you can download it online
new_train_npu_tooltips = The code is storaged in <strong style="color:#010101">%s</strong>, the pre-trained model is storaged in the environment <strong style="color:#010101">%s</strong>, and please put your model into <strong style="color:#010101">%s</strong> then you can download it online
new_infer_gpu_tooltips = The dataset is stored in <strong style="color:#010101">%s</strong>, the model file is stored in <strong style="color:#010101">%s</strong>, please store the inference output in <strong style="color:#010101">%s</strong> for subsequent downloads.

[points]
points = points
free = Free
points_hour = Points/hour
balance_of_points = Balance of Points:
hours = Hours
expected_time = , expected to be available for
points_acquisition_instructions = Points Acquisition Instructions
insufficient_points_balance = Insufficient points balance

+ 32
- 3
options/locale/locale_zh-CN.ini View File

@@ -23,6 +23,7 @@ signed_in_as=已登录用户
enable_javascript=使用 JavaScript能使本网站更好的工作。
toc=目录
return=返回OpenI
calculation_points=算力积分

username=用户名
email=电子邮件地址
@@ -69,6 +70,10 @@ your_dashboard=个人中心
your_profile=个人信息
your_starred=已点赞
your_settings=设置
invite_friends=邀请好友
your_friend=您的好友
invite_you_to_join_the_OpenI_AI_Collaboration_Platform_and_enjoy_abundant_free_computing_resources=邀请您加入启智社区AI协作平台,畅享充沛的免费算力资源!
recommender=推荐人

all=所有
sources=自建
@@ -536,7 +541,11 @@ form.name_reserved='%s' 用户名被保留。
form.name_pattern_not_allowed=用户名中不允许使用 "%s"。
form.name_chars_not_allowed=用户名 '%s' 包含无效字符。

static.invitationdetailsheetname=用户邀请详细数据
static.invitationNum=邀请用户数
static.sheetname=用户分析
static.srcUserId=推荐用户ID
static.invitationsheetname=用户邀请分析
static.id=ID
static.name=用户名
static.codemergecount=PR数
@@ -1096,6 +1105,7 @@ cloudbrain_operate=操作
cloudbrain_status_createtime=状态/创建时间
cloudbrain_status_runtime = 运行时长
cloudbrain_jobname_err=只能以小写字母或数字开头且只包含小写字母、数字、_和-,不能以_结尾,最长36个字符。
cloudbrain_samejob_err=同名任务已经被创建,系统处理中,请您稍候。
cloudbrain_bootfile_err=仓库中不存在启动文件
cloudbrain_query_fail=查询云脑任务失败。
cloudbrain.mirror_tag = 镜像标签
@@ -1307,6 +1317,7 @@ log_scroll_start=滚动到顶部
log_scroll_end=滚动到底部
modelconvert.tasknameempty=请输入任务名称。
modelconvert.inputshapeerror=格式输入错误,请输入如:1,1,32,32,与输入数据格式对应。
modelconvert.modelfileempty=请选择模型文件。

modelconvert.manage.create_error1=相同的名称模型转换任务已经存在。
modelconvert.manage.create_error2=只能创建一个正在运行的模型转换任务。
@@ -3089,6 +3100,11 @@ task_createmodel=`导入了新模型 <a href="%s/modelmanage/show_model_info?nam
task_gputrainjob=`创建了CPU/GPU类型训练任务 <a href="%s/cloudbrain/train-job/%s">%s</a>`
task_c2netnputrainjob=`创建了NPU类型训练任务 <a href="%s/grampus/train-job/%s">%s</a>`
task_c2netgputrainjob=`创建了CPU/GPU类型训练任务 <a href="%s/grampus/train-job/%s">%s</a>`
binded_wechat=绑定微信
dataset_recommended=`创建的数据集 <a href="%s/datasets">%s</a> 被设置为推荐数据集`
create_image=`提交了镜像 <span style="font-weight:bold;">%s</span>`
image_recommended=`提交的镜像 <span style="font-weight:bold;">%s</span> 被设置为推荐镜像`
update_user_avatar=更新了头像

[tool]
ago=%s前
@@ -3209,7 +3225,7 @@ card_type = 卡类型
wrong_specification=您目前不能使用这个资源规格,请选择其他资源规格。

job_name_rule = 请输入字母、数字、_和-,最长64个字符,且不能以中划线(-)结尾。
train_dataset_path_rule = 数据集位置存储在环境变量<strong style="color:#010101">data_url</strong>中,训练输出路径存储在环境变量<strong style="color:#010101">train_url</strong>中。
train_dataset_path_rule = 数据集位置存储在环境变量<strong style="color:#010101">data_url</strong>中,预训练模型存放在环境变量<strong style="color:#010101">ckpt_url</strong>中,训练输出路径存储在环境变量<strong style="color:#010101">train_url</strong>中。
infer_dataset_path_rule = 数据集位置存储在环境变量<strong style="color:#010101">data_url</strong>中,推理输出路径存储在环境变量<strong style="color:#010101">result_url</strong>中。
view_sample = 查看样例
inference_output_path_rule = 推理输出路径存储在环境变量result_url中。
@@ -3247,5 +3263,18 @@ load_code_failed=代码加载失败,请确认选择了正确的分支。


error.dataset_select = 数据集选择错误:数量超过限制或者有同名数据集
new_train_gpu_tooltips =训练脚本存储在<strong style="color:#010101">%s</strong>中,数据集存储在<strong style="color:#010101">%s</strong>中,训练输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。
new_infer_gpu_tooltips = 数据集存储在<strong style="color:#010101">%s</strong>中,模型文件存储在<strong style="color:#010101">%s</strong>中,推理输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。
new_train_gpu_tooltips =训练脚本存储在<strong style="color:#010101">%s</strong>中,数据集存储在<strong style="color:#010101">%s</strong>中,预训练模型存放在环境变量<strong style="color:#010101">%s</strong>中,训练输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。
new_train_npu_tooltips =训练脚本存储在<strong style="color:#010101">%s</strong>中,预训练模型存放在环境变量<strong style="color:#010101">%s</strong>中,训练输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。
new_infer_gpu_tooltips = 数据集存储在<strong style="color:#010101">%s</strong>中,模型文件存储在<strong style="color:#010101">%s</strong>中,推理输出请存储在<strong style="color:#010101">%s</strong>中以供后续下载。

[points]
points = 积分
free = 免费
points_hour = 积分/每小时
balance_of_points = 积分余额:
hours = 小时
expected_time = ,预计可用
points_acquisition_instructions = 积分获取说明
insufficient_points_balance = 积分余额不足



+ 1
- 1
package.json View File

@@ -80,4 +80,4 @@
"browserslist": [
"defaults"
]
}
}

+ 13
- 2
public/home/home.js View File

@@ -163,6 +163,11 @@ document.onreadystatechange = function () {
html += recordPrefix + actionName;
html += " <a href=\"" + getTaskLink(record) + "\" rel=\"nofollow\">" + record.RefName + "</a>"
}
else if(record.OpType == "35"){
var datasetLink = "<a href=\"" + getRepoLink(record) + "/datasets" + "\" rel=\"nofollow\">" + record.Content.split('|')[1] + "</a>";
actionName = actionName.replace('{dataset}', datasetLink);
html += recordPrefix + actionName;
}
else{
continue;
}
@@ -354,7 +359,10 @@ var actionNameZH={
"30":"导入了新模型",
"31":"创建了CPU/GPU类型训练任务",
"32":"创建了NPU类型训练任务",
"33":"创建了CPU/GPU类型训练任务"
"33":"创建了CPU/GPU类型训练任务",
"35":"创建的数据集 {dataset} 被设置为推荐数据集",
"36":"提交了镜像 {image}",
"37":"提交的镜像 {image} 被设置为推荐镜像",
};

var actionNameEN={
@@ -382,7 +390,10 @@ var actionNameEN={
"30":" created new model",
"31":" created CPU/GPU type training task",
"32":" created NPU type training task",
"33":" created CPU/GPU type training task"
"33":" created CPU/GPU type training task",
"35":" created dataset {dataset} was set as recommended dataset",
"36":"committed image {image}",
"37":"committed image {image} was set as recommended image",
};

var repoAndOrgZH={


+ 3
- 0
routers/admin/dataset.go View File

@@ -1,6 +1,7 @@
package admin

import (
"code.gitea.io/gitea/modules/notification"
"net/http"
"strconv"
"strings"
@@ -111,6 +112,8 @@ func DatasetAction(ctx *context.Context) {
if err != nil {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action"))))
} else {
d, _ := models.GetDatasetByID(datasetId)
notification.NotifyDatasetRecommend(ctx.User, d, ctx.Params(":action"))
ctx.JSON(http.StatusOK, models.BaseOKMessage)
}
}


+ 14
- 0
routers/api/v1/api.go View File

@@ -572,6 +572,19 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll)
m.Get("/query_user_activity", operationReq, repo_ext.QueryUserActivity)
m.Get("/query_user_login", operationReq, repo_ext.QueryUserLoginInfo)

m.Get("/query_invitation_current_month", operationReq, repo_ext.QueryInvitationCurrentMonth)
m.Get("/query_invitation_current_week", operationReq, repo_ext.QueryInvitationCurrentWeek)
m.Get("/query_invitation_last_week", operationReq, repo_ext.QueryInvitationLastWeek)
m.Get("/query_invitation_current_year", operationReq, repo_ext.QueryInvitationCurrentYear)
m.Get("/query_invitation_last30_day", operationReq, repo_ext.QueryInvitationLast30Day)
m.Get("/query_invitation_last_month", operationReq, repo_ext.QueryInvitationLastMonth)
m.Get("/query_invitation_yesterday", operationReq, repo_ext.QueryInvitationYesterday)
m.Get("/query_invitation_all", operationReq, repo_ext.QueryInvitationAll)
m.Get("/query_invitation_userdefine", operationReq, repo_ext.QueryUserDefineInvitationPage)

m.Get("/download_invitation_detail", operationReq, repo_ext.DownloadInvitationDetail)

//cloudbrain board
m.Group("/cloudbrainboard", func() {
m.Get("/downloadAll", repo.DownloadCloudBrainBoard)
@@ -969,6 +982,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("", repo.GetModelArtsTrainJobVersion)
m.Post("/stop_version", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo_ext.GrampusStopJob)
m.Get("/log", repo_ext.GrampusGetLog)
m.Get("/download_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo_ext.GrampusDownloadLog)
})
})
}, reqRepoReader(models.UnitTypeCloudBrain))


+ 5
- 1
routers/api/v1/repo/cloudbrain.go View File

@@ -379,7 +379,11 @@ func CloudbrainDownloadLogFile(ctx *context.Context) {
return
}

prefix := "/" + setting.CBCodePathPrefix + job.JobName + "/model"
logDir := "/model"
if job.JobType == string(models.JobTypeInference) {
logDir = cloudbrain.ResultPath
}
prefix := "/" + setting.CBCodePathPrefix + job.JobName + logDir
files, err := storage.GetOneLevelAllObjectUnderDirMinio(setting.Attachment.Minio.Bucket, prefix, "")
if err != nil {
log.Error("query cloudbrain model failed: %v", err)


+ 2
- 0
routers/api/v1/repo/cloudbrain_dashboard.go View File

@@ -733,6 +733,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
ctx.ServerError("Get job failed:", err)
return
}
models.LoadSpecs4CloudbrainInfo(ciTasks)
nilTime := time.Time{}
tasks := []models.TaskDetail{}
for i, task := range ciTasks {
@@ -769,6 +770,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
} else {
taskDetail.IsDelete = false
}
taskDetail.Spec = ciTasks[i].Spec
tasks = append(tasks, taskDetail)
}



+ 1
- 0
routers/authentication/wechat.go View File

@@ -31,6 +31,7 @@ func GetQRCode4Bind(ctx *context.Context) {

r, err := createQRCode4Bind(userId)
if err != nil {
log.Error("GetQRCode4Bind failed,error=%v", err)
ctx.JSON(200, map[string]interface{}{
"code": "9999",
"msg": "Get QR code failed",


+ 1
- 1
routers/authentication/wechat_event.go View File

@@ -1,9 +1,9 @@
package authentication

import (
"code.gitea.io/gitea/modules/auth/wechat"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
wechat "code.gitea.io/gitea/services/wechat"
"encoding/xml"
"io/ioutil"
"time"


+ 15
- 0
routers/home.go View File

@@ -106,6 +106,11 @@ func Dashboard(ctx *context.Context) {
log.Info("set image info=" + pictureInfo[0]["url"])
ctx.Data["image_url"] = pictureInfo[0]["url"]
ctx.Data["image_link"] = pictureInfo[0]["image_link"]

if len(pictureInfo) > 1 {
ctx.Data["invite_image_url"] = pictureInfo[1]["url"]
ctx.Data["invite_image_link"] = pictureInfo[1]["image_link"]
}
}
if !ctx.User.IsActive && setting.Service.RegisterEmailConfirm {
ctx.Data["Title"] = ctx.Tr("auth.active_your_account")
@@ -728,6 +733,16 @@ func getImageInfo(filename string) ([]map[string]string, error) {
return imageInfo, nil
}

func GetMapInfo(ctx *context.Context) {
filename := ctx.Query("filename")
url := setting.RecommentRepoAddr + filename
result, err := repository.RecommendContentFromPromote(url)
if err != nil {
log.Info("get file error:" + err.Error())
}
ctx.JSON(http.StatusOK, result)
}

func GetRankUser(index string) ([]map[string]interface{}, error) {
url := setting.RecommentRepoAddr + "user_rank_" + index
result, err := repository.RecommendFromPromote(url)


+ 5
- 0
routers/image/image.go View File

@@ -1,6 +1,7 @@
package image

import (
"code.gitea.io/gitea/modules/notification"
"net/http"
"strconv"

@@ -25,6 +26,10 @@ func Action(ctx *context.Context) {
if err != nil {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action"))))
} else {
image, err := models.GetImageByID(imageId)
if err == nil {
notification.NotifyImageRecommend(ctx.User, image, ctx.Params(":action"))
}
ctx.JSON(http.StatusOK, models.BaseOKMessage)
}
}

+ 49
- 24
routers/repo/ai_model_manage.go View File

@@ -27,6 +27,9 @@ const (
MODEL_LATEST = 1
MODEL_NOT_LATEST = 0
MODEL_MAX_SIZE = 1024 * 1024 * 1024
STATUS_COPY_MODEL = 1
STATUS_FINISHED = 0
STATUS_ERROR = 2
)

func saveModelByParameters(jobId string, versionName string, name string, version string, label string, description string, engine int, ctx *context.Context) error {
@@ -62,13 +65,9 @@ func saveModelByParameters(jobId string, versionName string, name string, versio
modelSelectedFile := ctx.Query("modelSelectedFile")
//download model zip //train type
if aiTask.ComputeResource == models.NPUResource {
modelPath, modelSize, err = downloadModelFromCloudBrainTwo(id, aiTask.JobName, "", aiTask.TrainUrl, modelSelectedFile)
if err != nil {
log.Info("download model from CloudBrainTwo faild." + err.Error())
return err
}
cloudType = models.TypeCloudBrainTwo
} else if aiTask.ComputeResource == models.GPUResource {
cloudType = models.TypeCloudBrainOne
var ResourceSpecs *models.ResourceSpecs
json.Unmarshal([]byte(setting.ResourceSpecs), &ResourceSpecs)
for _, tmp := range ResourceSpecs.ResourceSpec {
@@ -77,24 +76,8 @@ func saveModelByParameters(jobId string, versionName string, name string, versio
aiTask.FlavorName = flaverName
}
}
modelPath, modelSize, err = downloadModelFromCloudBrainOne(id, aiTask.JobName, "", aiTask.TrainUrl, modelSelectedFile)
if err != nil {
log.Info("download model from CloudBrainOne faild." + err.Error())
return err
}
cloudType = models.TypeCloudBrainOne
}
// else if cloudType == models.TypeC2Net {
// if aiTask.ComputeResource == models.NPUResource {
// modelPath, modelSize, err = downloadModelFromCloudBrainTwo(id, aiTask.JobName, "", aiTask.TrainUrl, modelSelectedFile)
// if err != nil {
// log.Info("download model from CloudBrainTwo faild." + err.Error())
// return err
// }
// } else if aiTask.ComputeResource == models.GPUResource {

// }
// }

accuracy := make(map[string]string)
accuracy["F1"] = ""
accuracy["Recall"] = ""
@@ -123,6 +106,7 @@ func saveModelByParameters(jobId string, versionName string, name string, versio
Engine: int64(engine),
TrainTaskInfo: string(aiTaskJson),
Accuracy: string(accuracyJson),
Status: STATUS_COPY_MODEL,
}

err = models.SaveModelToDb(model)
@@ -146,11 +130,44 @@ func saveModelByParameters(jobId string, versionName string, name string, versio

models.UpdateRepositoryUnits(ctx.Repo.Repository, units, deleteUnitTypes)

go asyncToCopyModel(aiTask, id, modelSelectedFile)

log.Info("save model end.")
notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, id, name, models.ActionCreateNewModelTask)
return nil
}

func asyncToCopyModel(aiTask *models.Cloudbrain, id string, modelSelectedFile string) {
if aiTask.ComputeResource == models.NPUResource {
modelPath, modelSize, err := downloadModelFromCloudBrainTwo(id, aiTask.JobName, "", aiTask.TrainUrl, modelSelectedFile)
if err != nil {
updateStatus(id, 0, STATUS_ERROR, modelPath, err.Error())
log.Info("download model from CloudBrainTwo faild." + err.Error())
} else {
updateStatus(id, modelSize, STATUS_FINISHED, modelPath, "")
}
} else if aiTask.ComputeResource == models.GPUResource {

modelPath, modelSize, err := downloadModelFromCloudBrainOne(id, aiTask.JobName, "", aiTask.TrainUrl, modelSelectedFile)
if err != nil {
updateStatus(id, 0, STATUS_ERROR, modelPath, err.Error())
log.Info("download model from CloudBrainOne faild." + err.Error())
} else {
updateStatus(id, modelSize, STATUS_FINISHED, modelPath, "")
}
}
}

func updateStatus(id string, modelSize int64, status int, modelPath string, statusDesc string) {
if len(statusDesc) > 400 {
statusDesc = statusDesc[0:400]
}
err := models.ModifyModelStatus(id, modelSize, status, modelPath, statusDesc)
if err != nil {
log.Info("update status error." + err.Error())
}
}

func SaveNewNameModel(ctx *context.Context) {
if !ctx.Repo.CanWrite(models.UnitTypeModelManage) {
ctx.Error(403, ctx.Tr("repo.model_noright"))
@@ -331,6 +348,7 @@ func QueryModelByParameters(repoId int64, page int) ([]*models.AiModelManage, in
RepoID: repoId,
Type: -1,
New: MODEL_LATEST,
Status: -1,
})
}

@@ -642,7 +660,6 @@ func queryUserName(intSlice []int64) map[int64]*models.User {
result[user.ID] = user
}
}

return result
}

@@ -685,6 +702,7 @@ func SetModelCount(ctx *context.Context) {
RepoID: repoId,
Type: Type,
New: MODEL_LATEST,
Status: -1,
})
ctx.Data["MODEL_COUNT"] = count
}
@@ -758,6 +776,7 @@ func ShowModelPageInfo(ctx *context.Context) {
RepoID: repoId,
Type: Type,
New: MODEL_LATEST,
Status: -1,
})
if err != nil {
ctx.ServerError("Cloudbrain", err)
@@ -835,6 +854,7 @@ func QueryModelListForPredict(ctx *context.Context) {
RepoID: repoId,
Type: ctx.QueryInt("type"),
New: -1,
Status: 0,
})
if err != nil {
ctx.ServerError("Cloudbrain", err)
@@ -896,12 +916,17 @@ func QueryOneLevelModelFile(ctx *context.Context) {
log.Info("TypeCloudBrainTwo list model file.")
prefix := model.Path[len(setting.Bucket)+1:]
fileinfos, _ := storage.GetOneLevelAllObjectUnderDir(setting.Bucket, prefix, parentDir)
if fileinfos == nil {
fileinfos = make([]storage.FileInfo, 0)
}
ctx.JSON(http.StatusOK, fileinfos)
} else if model.Type == models.TypeCloudBrainOne {
log.Info("TypeCloudBrainOne list model file.")
prefix := model.Path[len(setting.Attachment.Minio.Bucket)+1:]
fileinfos, _ := storage.GetOneLevelAllObjectUnderDirMinio(setting.Attachment.Minio.Bucket, prefix, parentDir)
if fileinfos == nil {
fileinfos = make([]storage.FileInfo, 0)
}
ctx.JSON(http.StatusOK, fileinfos)
}

}

+ 153
- 11
routers/repo/cloudbrain.go View File

@@ -2,7 +2,6 @@ package repo

import (
"bufio"
"code.gitea.io/gitea/services/cloudbrain/resource"
"encoding/json"
"errors"
"fmt"
@@ -16,6 +15,11 @@ import (
"time"
"unicode/utf8"

"code.gitea.io/gitea/modules/dataset"

"code.gitea.io/gitea/services/cloudbrain/resource"
"code.gitea.io/gitea/services/reward/point/account"

"code.gitea.io/gitea/modules/notification"

"code.gitea.io/gitea/modules/grampus"
@@ -31,6 +35,8 @@ import (
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/modelarts"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/util"
@@ -137,6 +143,29 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error {

ctx.Data["benchmarkMode"] = ctx.Query("benchmarkMode")

if ctx.Cloudbrain != nil {
ctx.Data["branch_name"] = ctx.Cloudbrain.BranchName
ctx.Data["image"] = ctx.Cloudbrain.Image
ctx.Data["image_id"] = ctx.Cloudbrain.ImageID
ctx.Data["boot_file"] = ctx.Cloudbrain.BootFile
ctx.Data["description"] = ctx.Cloudbrain.Description
spec, _ := resource.GetCloudbrainSpec(ctx.Cloudbrain.ID)
if spec != nil {
ctx.Data["spec_id"] = spec.ID
}
ctx.Data["run_para_list"] = ctx.Cloudbrain.Parameters
ctx.Data["model_name"] = ctx.Cloudbrain.ModelName
ctx.Data["label_name"] = ctx.Cloudbrain.LabelName
ctx.Data["ckpt_name"] = ctx.Cloudbrain.CkptName
ctx.Data["model_version"] = ctx.Cloudbrain.ModelVersion
ctx.Data["pre_train_model_url"] = ctx.Cloudbrain.PreTrainModelUrl
ctx.Data["compute_resource"] = ctx.Cloudbrain.ComputeResource
uuids, datasetNames := dataset.GetFilterDeletedAttachments(ctx.Cloudbrain.Uuid)
ctx.Data["attachment"] = uuids
ctx.Data["dataset_name"] = datasetNames
ctx.Data["cluster_type"] = models.OpenICluster
}

return nil
}

@@ -183,8 +212,12 @@ func CloudBrainNew(ctx *context.Context) {
ctx.Data["PageIsGPUDebug"] = true
ctx.HTML(200, tplCloudBrainNew)
}

func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
ctx.Data["IsCreate"] = true
cloudBrainCreate(ctx, form)
}

func cloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
ctx.Data["PageIsCloudBrain"] = true
displayJobName := form.DisplayJobName
jobName := util.ConvertDisplayJobNameToJobName(displayJobName)
@@ -201,6 +234,16 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
tpl = tplCloudBrainTrainJobNew
}

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), jobType, displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form)
return
}
defer lock.UnLock()

tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName)
if err == nil {
if len(tasks) != 0 {
@@ -302,6 +345,13 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
return
}

if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form)
return
}

req := cloudbrain.GenerateCloudBrainTaskReq{
Ctx: ctx,
DisplayJobName: displayJobName,
@@ -328,13 +378,22 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
Spec: spec,
}

if form.ModelName != "" { //使用预训练模型训练
req.ModelName = form.ModelName
req.LabelName = form.LabelName
req.CkptName = form.CkptName
req.ModelVersion = form.ModelVersion
req.PreTrainModelPath = setting.Attachment.Minio.RealPath + form.PreTrainModelUrl
req.PreTrainModelUrl = form.PreTrainModelUrl

}

err = cloudbrain.GenerateTask(req)
if err != nil {
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}

if jobType == string(models.JobTypeTrain) {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=all")
} else {
@@ -342,6 +401,11 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
}
}

func CloudBrainTrainJobVersionCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
ctx.Data["IsCreate"] = false
cloudBrainCreate(ctx, form)
}

func loadCodeAndMakeModelPath(repo *models.Repository, codePath string, branchName string, jobName string, resultPath string) string {
err := downloadCode(repo, codePath, branchName)
if err != nil {
@@ -378,10 +442,20 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
bootFile := strings.TrimSpace(form.BootFile)
labelName := form.LabelName
repo := ctx.Repo.Repository
tpl := tplCloudBrainInferenceJobNew

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), jobType, displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form)
return
}
defer lock.UnLock()

ckptUrl := setting.Attachment.Minio.RealPath + form.TrainUrl + form.CkptName
log.Info("ckpt url:" + ckptUrl)
tpl := tplCloudBrainInferenceJobNew
command, err := getInferenceJobCommand(form)
if err != nil {
log.Error("getTrainJobCommand failed: %v", err)
@@ -465,6 +539,12 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
ctx.RenderWithErr("Resource specification not available", tpl, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form)
return
}
req := cloudbrain.GenerateCloudBrainTaskReq{
Ctx: ctx,
DisplayJobName: displayJobName,
@@ -589,6 +669,13 @@ func CloudBrainRestart(ctx *context.Context) {
}
task.Spec = spec

if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
resultCode = "-1"
errorMsg = ctx.Tr("points.insufficient_points_balance")
break
}

count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, string(models.JobTypeDebug))
if err != nil {
log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"])
@@ -1005,7 +1092,7 @@ func CloudBrainAdminCommitImage(ctx *context.Context, form auth.CommitAdminImage
UID: ctx.User.ID,
Type: models.GetRecommondType(form.IsRecommend),
Place: form.Place,
})
}, ctx.User)
if err != nil {
log.Error("CommitImagefailed")
if models.IsErrImageTagExist(err) {
@@ -1052,7 +1139,7 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
})
}, ctx.User)
if err != nil {
log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"])
if models.IsErrImageTagExist(err) {
@@ -1066,7 +1153,6 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain

return
}

ctx.JSON(200, models.BaseOKMessage)
}

@@ -1103,6 +1189,7 @@ func CloudBrainStop(ctx *context.Context) {
log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"])
resultCode = "-1"
errorMsg = "cloudbrain.Already_stopped"
resultCode = task.Status
break
}

@@ -1129,7 +1216,6 @@ func CloudBrainStop(ctx *context.Context) {
errorMsg = "cloudbrain.Stopped_success_update_status_fail"
break
}

status = task.Status
break
}
@@ -1184,7 +1270,7 @@ func StopJobs(cloudBrains []*models.Cloudbrain) {
})

logErrorAndUpdateJobStatus(err, taskInfo)
} else {
} else if taskInfo.Type == models.TypeCloudBrainTwo {
if taskInfo.JobType == string(models.JobTypeTrain) {
err := retry(3, time.Second*30, func() error {
_, err := modelarts.StopTrainJob(taskInfo.JobID, strconv.FormatInt(taskInfo.VersionID, 10))
@@ -1201,8 +1287,16 @@ func StopJobs(cloudBrains []*models.Cloudbrain) {
})
logErrorAndUpdateJobStatus(err, taskInfo)
}
}
} else if taskInfo.Type == models.TypeC2Net {
if taskInfo.JobType == string(models.JobTypeTrain) {
err := retry(3, time.Second*30, func() error {
_, err := grampus.StopJob(taskInfo.JobID)
return err
})
logErrorAndUpdateJobStatus(err, taskInfo)
}

}
}
}

@@ -1812,6 +1906,10 @@ func SyncCloudbrainStatus() {
oldStatus := task.Status
task.Status = grampus.TransTrainJobStatus(result.JobInfo.Status)
task.Duration = result.JobInfo.RunSec

if task.Duration < 0 {
task.Duration = 0
}
task.TrainJobDuration = models.ConvertDurationToStr(task.Duration)

if task.StartTime == 0 && result.JobInfo.StartedAt > 0 {
@@ -2206,12 +2304,21 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo
codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath
benchmarkTypeID := form.BenchmarkTypeID
benchmarkChildTypeID := form.BenchmarkChildTypeID
repo := ctx.Repo.Repository

ctx.Data["description"] = form.Description
ctx.Data["benchmarkTypeID"] = benchmarkTypeID
ctx.Data["benchmark_child_types_id_hidden"] = benchmarkChildTypeID

repo := ctx.Repo.Repository
lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), form.JobType, displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplCloudBrainBenchmarkNew, &form)
return
}
defer lock.UnLock()

tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeBenchmark), displayJobName)
if err == nil {
@@ -2254,6 +2361,12 @@ func BenchMarkAlgorithmCreate(ctx *context.Context, form auth.CreateCloudBrainFo
ctx.RenderWithErr("Resource specification not available", tplCloudBrainBenchmarkNew, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplCloudBrainBenchmarkNew, &form)
return
}

count, err := models.GetBenchmarkCountByUserID(ctx.User.ID)
if err != nil {
@@ -2387,6 +2500,16 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm)
tpl := tplCloudBrainBenchmarkNew
command := cloudbrain.GetCloudbrainDebugCommand()

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), jobType, displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tpl, &form)
return
}
defer lock.UnLock()

tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName)
if err == nil {
if len(tasks) != 0 {
@@ -2472,6 +2595,13 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm)
ctx.RenderWithErr("Resource specification not available", tpl, &form)
return
}

if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tpl, &form)
return
}
log.Info("Command=" + command)
log.Info("ModelPath=" + storage.GetMinioPath(jobName, cloudbrain.ModelMountPath+"/"))
req := cloudbrain.GenerateCloudBrainTaskReq{
@@ -2540,6 +2670,15 @@ func BenchmarkDel(ctx *context.Context) {
}

func CloudBrainTrainJobNew(ctx *context.Context) {
ctx.Data["IsCreate"] = true
cloudBrainTrainJobCreate(ctx)
}
func CloudBrainTrainJobVersionNew(ctx *context.Context) {
ctx.Data["IsCreate"] = false
cloudBrainTrainJobCreate(ctx)
}

func cloudBrainTrainJobCreate(ctx *context.Context) {
err := cloudBrainNewDataPrepare(ctx)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
@@ -2629,6 +2768,9 @@ func getTrainJobCommand(form auth.CreateCloudBrainForm) (string, error) {
param += " --" + parameter.Label + "=" + parameter.Value
}
}
if form.CkptName != "" {
param += " --ckpt_url" + "=" + "/pretrainmodel/" + form.CkptName
}

command += "python /code/" + bootFile + param + " > " + cloudbrain.ModelMountPath + "/" + form.DisplayJobName + "-" + cloudbrain.LogFile



+ 311
- 84
routers/repo/grampus.go View File

@@ -1,7 +1,6 @@
package repo

import (
"code.gitea.io/gitea/services/cloudbrain/resource"
"encoding/json"
"errors"
"fmt"
@@ -13,11 +12,19 @@ import (
"strings"
"time"

"code.gitea.io/gitea/modules/dataset"

"code.gitea.io/gitea/services/cloudbrain/resource"

"code.gitea.io/gitea/services/reward/point/account"

"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/grampus"
"code.gitea.io/gitea/modules/modelarts"
"code.gitea.io/gitea/modules/notification"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"github.com/unknwon/com"
@@ -41,7 +48,7 @@ const (
)

func GrampusTrainJobGPUNew(ctx *context.Context) {
ctx.Data["datasetType"] = models.TypeCloudBrainOne
ctx.Data["IsCreate"] = true
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
@@ -52,7 +59,7 @@ func GrampusTrainJobGPUNew(ctx *context.Context) {
}

func GrampusTrainJobNPUNew(ctx *context.Context) {
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
ctx.Data["IsCreate"] = true
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
@@ -134,9 +141,57 @@ func grampusTrainJobNewDataPrepare(ctx *context.Context, processType string) err
ctx.Data["WaitCount"] = waitCount
}

if ctx.Cloudbrain != nil {
uuids, datasetNames := dataset.GetFilterDeletedAttachments(ctx.Cloudbrain.Uuid)
ctx.Data["attachment"] = uuids
ctx.Data["boot_file"] = ctx.Cloudbrain.BootFile
ctx.Data["image_id"] = ctx.Cloudbrain.ImageID
ctx.Data["run_para_list"] = ctx.Cloudbrain.Parameters
ctx.Data["description"] = ctx.Cloudbrain.Description
ctx.Data["branch_name"] = ctx.Cloudbrain.BranchName
ctx.Data["engine_name"] = ctx.Cloudbrain.EngineName
ctx.Data["work_server_number"] = ctx.Cloudbrain.WorkServerNumber
if ctx.Cloudbrain.Image != "" {
ctx.Data["image"] = ctx.Cloudbrain.Image
} else {
ctx.Data["image"] = ctx.Cloudbrain.EngineName
}
ctx.Data["dataset_name"] = datasetNames
ctx.Data["model_name"] = ctx.Cloudbrain.ModelName

ctx.Data["model_version"] = ctx.Cloudbrain.ModelVersion
ctx.Data["ckpt_name"] = ctx.Cloudbrain.CkptName
ctx.Data["label_names"] = ctx.Cloudbrain.LabelName
ctx.Data["pre_train_model_url"] = ctx.Cloudbrain.PreTrainModelUrl
spec, _ := resource.GetCloudbrainSpec(ctx.Cloudbrain.ID)
if spec != nil {
ctx.Data["spec_id"] = spec.ID
}

}
return nil
}

func GrampusTrainJobVersionNew(ctx *context.Context) {
task := ctx.Cloudbrain
ctx.Data["IsCreate"] = false
if task.ComputeResource == models.GPUResource {
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
if err != nil {
ctx.ServerError("get new train-job version info failed", err)
return
}
ctx.HTML(http.StatusOK, tplGrampusTrainJobGPUNew)
} else if task.ComputeResource == models.NPUResource {
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
if err != nil {
ctx.ServerError("get new train-job version info failed", err)
return
}
ctx.HTML(200, tplGrampusTrainJobNPUNew)
}
}

func prepareGrampusTrainSpecs(ctx *context.Context, computeResource string) {
noteBookSpecs, _ := resource.FindAvailableSpecs(ctx.User.ID, models.FindSpecsOptions{
JobType: models.JobTypeTrain,
@@ -201,6 +256,12 @@ func grampusParamCheckCreateTrainJob(form auth.CreateGrampusTrainJobForm) error
}

func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrainJobForm) {
ctx.Data["IsCreate"] = true
grampusTrainJobGpuCreate(ctx, form)
}

func grampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrainJobForm) {

displayJobName := form.DisplayJobName
jobName := util.ConvertDisplayJobNameToJobName(displayJobName)
uuid := form.Attachment
@@ -210,28 +271,31 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
repo := ctx.Repo.Repository
codeLocalPath := setting.JobPath + jobName + cloudbrain.CodeMountPath + "/"
codeMinioPath := setting.CBCodePathPrefix + jobName + cloudbrain.CodeMountPath + "/"
dataMinioPath := setting.Attachment.Minio.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid
branchName := form.BranchName
image := strings.TrimSpace(form.Image)
tpl := tplGrampusTrainJobGPUNew

if !jobNamePattern.MatchString(displayJobName) {
lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeTrain), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplGrampusTrainJobGPUNew, &form)
return
}
defer lock.UnLock()

bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
if !jobNamePattern.MatchString(displayJobName) {
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form)
return
}

errStr := checkSpecialPool(ctx, "GPU")
if errStr != "" {
bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(errStr, tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form)
return
}

@@ -240,13 +304,13 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err != nil {
log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("system error", tpl, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tpl, &form)
return
}
}
@@ -255,7 +319,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := grampusParamCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}

@@ -265,14 +329,14 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("the job name did already exist", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("the job name did already exist", tpl, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("system error", tpl, &form)
return
}
}
@@ -285,16 +349,24 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
})
if err != nil || spec == nil {
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("Resource specification not available", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("Resource specification not available", tpl, &form)
return
}

if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplGrampusTrainJobGPUNew, &form)
return
}

//check dataset
attachment, err := models.GetAttachmentByUUID(uuid)

datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid, models.GPU)
if err != nil {
log.Error("GetAttachmentByUUID failed:", err.Error(), ctx.Data["MsgID"])
log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("dataset is not exist", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form)
return
}

@@ -307,7 +379,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := downloadZipCode(ctx, codeLocalPath, branchName); err != nil {
log.Error("downloadZipCode failed, server timed out: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

@@ -316,7 +388,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := uploadCodeToMinio(codeLocalPath+"/", jobName, cloudbrain.CodeMountPath+"/"); err != nil {
log.Error("Failed to uploadCodeToMinio: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

@@ -324,7 +396,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := mkModelPath(modelPath); err != nil {
log.Error("Failed to mkModelPath: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

@@ -332,52 +404,102 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := uploadCodeToMinio(modelPath, jobName, cloudbrain.ModelMountPath+"/"); err != nil {
log.Error("Failed to uploadCodeToMinio: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

var datasetRemotePath, allFileName string
for _, datasetInfo := range datasetInfos {
if datasetRemotePath == "" {
datasetRemotePath = datasetInfo.DataLocalPath
allFileName = datasetInfo.FullName
} else {
datasetRemotePath = datasetRemotePath + ";" + datasetInfo.DataLocalPath
allFileName = allFileName + ";" + datasetInfo.FullName
}

}

//prepare command
command, err := generateCommand(repo.Name, grampus.ProcessorTypeGPU, codeMinioPath+cloudbrain.DefaultBranchName+".zip", dataMinioPath, bootFile, params, setting.CBCodePathPrefix+jobName+cloudbrain.ModelMountPath+"/", attachment.Name)
preTrainModelPath := getPreTrainModelPath(form.PreTrainModelUrl, form.CkptName)

command, err := generateCommand(repo.Name, grampus.ProcessorTypeGPU, codeMinioPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CBCodePathPrefix+jobName+cloudbrain.ModelMountPath+"/", allFileName, preTrainModelPath, form.CkptName)
if err != nil {
log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr("Create task failed, internal error", tpl, &form)
return
}

commitID, _ := ctx.Repo.GitRepo.GetBranchCommitID(branchName)

req := &grampus.GenerateTrainJobReq{
JobName: jobName,
DisplayJobName: displayJobName,
ComputeResource: models.GPUResource,
ProcessType: grampus.ProcessorTypeGPU,
Command: command,
ImageUrl: image,
Description: description,
BootFile: bootFile,
Uuid: uuid,
CommitID: commitID,
BranchName: branchName,
Params: form.Params,
EngineName: image,
DatasetName: attachment.Name,
JobName: jobName,
DisplayJobName: displayJobName,
ComputeResource: models.GPUResource,
ProcessType: grampus.ProcessorTypeGPU,
Command: command,
ImageUrl: image,
Description: description,
BootFile: bootFile,
Uuid: uuid,
CommitID: commitID,
BranchName: branchName,
Params: form.Params,
EngineName: image,
DatasetNames: datasetNames,
DatasetInfos: datasetInfos,

IsLatestVersion: modelarts.IsLatestVersion,
VersionCount: modelarts.VersionCountOne,
WorkServerNumber: 1,
Spec: spec,
}

if form.ModelName != "" { //使用预训练模型训练
req.ModelName = form.ModelName
req.LabelName = form.LabelName
req.CkptName = form.CkptName
req.ModelVersion = form.ModelVersion
req.PreTrainModelUrl = form.PreTrainModelUrl

}

err = grampus.GenerateTrainJob(ctx, req)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error(), ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job")
}

func getPreTrainModelPath(pretrainModelDir string, fileName string) string {
index := strings.Index(pretrainModelDir, "/")
if index > 0 {
filterBucket := pretrainModelDir[index+1:]
return filterBucket + fileName
} else {
return ""
}

}

func GrampusTrainJobVersionCreate(ctx *context.Context, form auth.CreateGrampusTrainJobForm) {
ctx.Data["IsCreate"] = false
computeResource := ctx.Query("compute_resource")
if computeResource == models.GPUResource {
grampusTrainJobGpuCreate(ctx, form)
} else if computeResource == models.NPUResource {
grampusTrainJobNpuCreate(ctx, form)
} else {
ctx.ServerError("resource error", errors.New("compute resource is not support"))
return
}

}

func checkSpecialPool(ctx *context.Context, resourceType string) string {
grampus.InitSpecialPool()
if grampus.SpecialPools != nil {
@@ -401,6 +523,12 @@ func checkSpecialPool(ctx *context.Context, resourceType string) string {
}

func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrainJobForm) {
ctx.Data["IsCreate"] = true
grampusTrainJobNpuCreate(ctx, form)
}

func grampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrainJobForm) {

displayJobName := form.DisplayJobName
jobName := util.ConvertDisplayJobNameToJobName(displayJobName)
uuid := form.Attachment
@@ -410,30 +538,34 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
repo := ctx.Repo.Repository
codeLocalPath := setting.JobPath + jobName + modelarts.CodePath
codeObsPath := grampus.JobPath + jobName + modelarts.CodePath
dataObsPath := setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/"
//dataObsPath := setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/"
branchName := form.BranchName
isLatestVersion := modelarts.IsLatestVersion
versionCount := modelarts.VersionCountOne
engineName := form.EngineName
tpl := tplGrampusTrainJobNPUNew

if !jobNamePattern.MatchString(displayJobName) {
lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeTrain), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplGrampusTrainJobNPUNew, &form)
return
}
defer lock.UnLock()

bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
if !jobNamePattern.MatchString(displayJobName) {
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form)
return
}

errStr := checkSpecialPool(ctx, "NPU")
if errStr != "" {
bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(errStr, tplGrampusTrainJobGPUNew, &form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form)
return
}

@@ -442,13 +574,13 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err != nil {
log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("system error", tpl, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tpl, &form)
return
}
}
@@ -457,7 +589,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := grampusParamCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}

@@ -467,14 +599,14 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("the job name did already exist", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("the job name did already exist", tpl, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("system error", tpl, &form)
return
}
}
@@ -487,16 +619,22 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
})
if err != nil || spec == nil {
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("Resource specification not available", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("Resource specification not available", tpl, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplGrampusTrainJobNPUNew, &form)
return
}

//check dataset
attachment, err := models.GetAttachmentByUUID(uuid)
datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid, models.NPU)
if err != nil {
log.Error("GetAttachmentByUUID failed:", err.Error(), ctx.Data["MsgID"])
log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("dataset is not exist", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form)
return
}

@@ -509,7 +647,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := downloadZipCode(ctx, codeLocalPath, branchName); err != nil {
log.Error("downloadZipCode failed, server timed out: %s (%v)", repo.FullName(), err)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

@@ -517,23 +655,36 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.OutputPath); err != nil {
log.Error("Failed to obsMkdir_output: %s (%v)", repo.FullName(), err)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

if err := uploadCodeToObs(codeLocalPath, jobName, ""); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tpl, &form)
return
}

var datasetRemotePath, allFileName string
for _, datasetInfo := range datasetInfos {
if datasetRemotePath == "" {
datasetRemotePath = datasetInfo.DataLocalPath + "'" + datasetInfo.FullName + "'"
allFileName = datasetInfo.FullName
} else {
datasetRemotePath = datasetRemotePath + ";" + datasetInfo.DataLocalPath + "'" + datasetInfo.FullName + "'"
allFileName = allFileName + ";" + datasetInfo.FullName
}

}

//prepare command
command, err := generateCommand(repo.Name, grampus.ProcessorTypeNPU, codeObsPath+cloudbrain.DefaultBranchName+".zip", dataObsPath+"'"+attachment.Name+"'", bootFile, params, setting.CodePathPrefix+jobName+modelarts.OutputPath, attachment.Name)
preTrainModelPath := getPreTrainModelPath(form.PreTrainModelUrl, form.CkptName)
command, err := generateCommand(repo.Name, grampus.ProcessorTypeNPU, codeObsPath+cloudbrain.DefaultBranchName+".zip", datasetRemotePath, bootFile, params, setting.CodePathPrefix+jobName+modelarts.OutputPath, allFileName, preTrainModelPath, form.CkptName)
if err != nil {
log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"])
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr("Create task failed, internal error", tpl, &form)
return
}

@@ -546,7 +697,6 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
ProcessType: grampus.ProcessorTypeNPU,
Command: command,
ImageId: form.ImageID,
DataUrl: dataObsPath,
Description: description,
CodeObsPath: codeObsPath,
BootFileUrl: codeObsPath + bootFile,
@@ -560,15 +710,24 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
EngineName: engineName,
VersionCount: versionCount,
TotalVersionCount: modelarts.TotalVersionCount,
DatasetName: attachment.Name,
DatasetNames: datasetNames,
DatasetInfos: datasetInfos,
Spec: spec,
}
if form.ModelName != "" { //使用预训练模型训练
req.ModelName = form.ModelName
req.LabelName = form.LabelName
req.CkptName = form.CkptName
req.ModelVersion = form.ModelVersion
req.PreTrainModelUrl = form.PreTrainModelUrl
req.PreTrainModelPath = preTrainModelPath
}

err = grampus.GenerateTrainJob(ctx, req)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error())
grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobNPUNew, &form)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job")
@@ -695,6 +854,9 @@ func GrampusTrainJobShow(ctx *context.Context) {
task.Status = grampus.TransTrainJobStatus(result.JobInfo.Status)
if task.Status != result.JobInfo.Status || result.JobInfo.Status == models.GrampusStatusRunning {
task.Duration = result.JobInfo.RunSec
if task.Duration < 0 {
task.Duration = 0
}
task.TrainJobDuration = models.ConvertDurationToStr(task.Duration)

if task.StartTime == 0 && result.JobInfo.StartedAt > 0 {
@@ -752,7 +914,7 @@ func GrampusTrainJobShow(ctx *context.Context) {
ctx.HTML(http.StatusOK, tplGrampusTrainJobShow)
}

func GrampusGetLog(ctx *context.Context) {
func GrampusDownloadLog(ctx *context.Context) {
jobID := ctx.Params(":jobid")
job, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
@@ -764,19 +926,46 @@ func GrampusGetLog(ctx *context.Context) {
content, err := grampus.GetTrainJobLog(job.JobID)
if err != nil {
log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"])
content = ""
}
fileName := job.JobName + "-log.txt"
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
var b []byte = []byte(content)
ctx.Resp.Write(b)
}

func GrampusGetLog(ctx *context.Context) {
jobID := ctx.Params(":jobid")
job, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
log.Error("GetCloudbrainByJobID failed: %v", err, ctx.Data["MsgID"])
ctx.ServerError(err.Error(), err)
return
}

content, err := grampus.GetTrainJobLog(job.JobID)
if err != nil {
log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"])
ctx.ServerError(err.Error(), err)
return
}
var canLogDownload bool
if err != nil {
canLogDownload = false
} else {
canLogDownload = true
}
ctx.JSON(http.StatusOK, map[string]interface{}{
"JobName": job.JobName,
"Content": content,
"JobName": job.JobName,
"Content": content,
"CanLogDownload": canLogDownload,
})

return
}

func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName string) (string, error) {
func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelPath, pretrainModelFileName string) (string, error) {
var command string

workDir := grampus.NpuWorkDir
@@ -784,23 +973,26 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo
workDir = grampus.GpuWorkDir
}

command += "pwd;cd " + workDir + grampus.CommandPrepareScript
command += "pwd;cd " + workDir + fmt.Sprintf(grampus.CommandPrepareScript, setting.Grampus.SyncScriptProject, setting.Grampus.SyncScriptProject)
//download code & dataset
if processorType == grampus.ProcessorTypeNPU {
commandDownload := "./downloader_for_obs " + setting.Bucket + " " + codeRemotePath + " " + grampus.CodeArchiveName + " " + dataRemotePath + " '" + datasetName + "';"
commandDownload := "./downloader_for_obs " + setting.Bucket + " " + codeRemotePath + " " + grampus.CodeArchiveName + ";"
command += commandDownload
} else if processorType == grampus.ProcessorTypeGPU {
commandDownload := "./downloader_for_minio " + setting.Grampus.Env + " " + codeRemotePath + " " + grampus.CodeArchiveName + " " + dataRemotePath + " '" + datasetName + "';"
commandDownload := "./downloader_for_minio " + setting.Grampus.Env + " " + codeRemotePath + " " + grampus.CodeArchiveName + " '" + dataRemotePath + "' '" + datasetName + "'"
commandDownload = processPretrainModelParameter(pretrainModelPath, pretrainModelFileName, commandDownload)
command += commandDownload
}

//unzip code & dataset
toolUnzip := "unzip -q '"
if strings.HasSuffix(datasetName, ".tar.gz") {
toolUnzip = "tar -zxvf '"
if processorType == grampus.ProcessorTypeNPU {
commandUnzip := "cd " + workDir + "code;unzip -q master.zip;"
command += commandUnzip
} else if processorType == grampus.ProcessorTypeGPU {
unZipDatasetCommand := generateDatasetUnzipCommand(datasetName)
commandUnzip := "cd " + workDir + "code;unzip -q master.zip;echo \"start to unzip dataset\";cd " + workDir + "dataset;" + unZipDatasetCommand
command += commandUnzip
}
commandUnzip := "cd " + workDir + "code;unzip -q master.zip;echo \"start to unzip dataset\";cd " + workDir + "dataset;" + toolUnzip + datasetName + "';"
command += commandUnzip

command += "echo \"unzip finished;start to exec code;\";"

@@ -834,6 +1026,9 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo
if processorType == grampus.ProcessorTypeNPU {
commandCode = "/bin/bash /home/work/run_train_for_openi.sh " + workDir + "code/" + strings.ToLower(repoName) + "/" + bootFile + " /tmp/log/train.log" + paramCode + ";"
} else if processorType == grampus.ProcessorTypeGPU {
if pretrainModelFileName != "" {
paramCode += " --ckpt_url" + "=" + workDir + "pretrainmodel/" + pretrainModelFileName
}
commandCode = "cd " + workDir + "code/" + strings.ToLower(repoName) + ";python " + bootFile + paramCode + ";"
}

@@ -845,10 +1040,10 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo

//upload models
if processorType == grampus.ProcessorTypeNPU {
commandUpload := "cd " + workDir + "script_for_grampus/;./uploader_for_npu " + setting.Bucket + " " + outputRemotePath + " " + workDir + "output/;"
commandUpload := "cd " + workDir + setting.Grampus.SyncScriptProject + "/;./uploader_for_npu " + setting.Bucket + " " + outputRemotePath + " " + workDir + "output/;"
command += commandUpload
} else if processorType == grampus.ProcessorTypeGPU {
commandUpload := "cd " + workDir + "script_for_grampus/;./uploader_for_gpu " + setting.Grampus.Env + " " + outputRemotePath + " " + workDir + "output/;"
commandUpload := "cd " + workDir + setting.Grampus.SyncScriptProject + "/;./uploader_for_gpu " + setting.Grampus.Env + " " + outputRemotePath + " " + workDir + "output/;"
command += commandUpload
}

@@ -859,6 +1054,38 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo
return command, nil
}

func processPretrainModelParameter(pretrainModelPath string, pretrainModelFileName string, commandDownload string) string {
commandDownloadTemp := commandDownload
if pretrainModelPath != "" {
commandDownloadTemp += " '" + pretrainModelPath + "' '" + pretrainModelFileName + "'"
}
commandDownloadTemp += ";"
return commandDownloadTemp
}

func generateDatasetUnzipCommand(datasetName string) string {
var unZipDatasetCommand string

datasetNameArray := strings.Split(datasetName, ";")
if len(datasetNameArray) == 1 { //单数据集
unZipDatasetCommand = "unzip -q '" + datasetName + "';"
if strings.HasSuffix(datasetNameArray[0], ".tar.gz") {
unZipDatasetCommand = "tar --strip-components=1 -zxvf '" + datasetName + "';"
}

} else { //多数据集
for _, datasetNameTemp := range datasetNameArray {
if strings.HasSuffix(datasetNameTemp, ".tar.gz") {
unZipDatasetCommand = unZipDatasetCommand + "tar -zxvf '" + datasetNameTemp + "';"
} else {
unZipDatasetCommand = unZipDatasetCommand + "unzip -q '" + datasetNameTemp + "' -d './" + strings.TrimSuffix(datasetNameTemp, ".zip") + "';"
}
}

}
return unZipDatasetCommand
}

func downloadZipCode(ctx *context.Context, codePath, branchName string) error {
archiveType := git.ZIP
archivePath := codePath


+ 158
- 254
routers/repo/modelarts.go View File

@@ -2,8 +2,6 @@ package repo

import (
"archive/zip"
"code.gitea.io/gitea/modules/modelarts_cd"
"code.gitea.io/gitea/services/cloudbrain/resource"
"encoding/json"
"errors"
"fmt"
@@ -17,6 +15,12 @@ import (
"time"
"unicode/utf8"

"code.gitea.io/gitea/modules/dataset"

"code.gitea.io/gitea/modules/modelarts_cd"
"code.gitea.io/gitea/services/cloudbrain/resource"
"code.gitea.io/gitea/services/reward/point/account"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/base"
@@ -27,6 +31,8 @@ import (
"code.gitea.io/gitea/modules/modelarts"
"code.gitea.io/gitea/modules/notification"
"code.gitea.io/gitea/modules/obs"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/timeutil"
@@ -209,6 +215,16 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm
imageId := form.ImageId
repo := ctx.Repo.Repository

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeDebug), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
notebookNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplModelArtsNotebookNew, &form)
return
}
defer lock.UnLock()

count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID)
if err != nil {
log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"])
@@ -254,6 +270,13 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm
ctx.RenderWithErr("Resource specification not available", tplModelArtsNotebookNew, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d ", ctx.User.ID, spec.ID)
cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsNotebookNew, &form)
return
}

if setting.ModelartsCD.Enabled {
err = modelarts_cd.GenerateNotebook(ctx, displayJobName, jobName, uuid, description, imageId, spec)
} else {
@@ -461,7 +484,11 @@ func NotebookRestart(ctx *context.Context) {
errorMsg = "Resource specification not support any more"
break
}

if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
errorMsg = ctx.Tr("points.insufficient_points_balance")
break
}
createTime := timeutil.TimeStampNow()
param := models.NotebookAction{
Action: models.ActionStart,
@@ -834,84 +861,6 @@ func setSpecBySpecialPoolConfig(ctx *context.Context, jobType string) {
}
}

func trainJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) error {
ctx.Data["PageIsCloudBrain"] = true

//can, err := canUserCreateTrainJob(ctx.User.ID)
//if err != nil {
// ctx.ServerError("canUserCreateTrainJob", err)
// return
//}
//
//if !can {
// log.Error("the user can not create train-job")
// ctx.ServerError("the user can not create train-job", fmt.Errorf("the user can not create train-job"))
// return
//}

t := time.Now()
var displayJobName = jobNamePrefixValid(cutString(ctx.User.Name, 5)) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:]
ctx.Data["display_job_name"] = displayJobName

attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID)
if err != nil {
ctx.ServerError("GetAllUserAttachments failed:", err)
return err
}
ctx.Data["attachments"] = attachs

var resourcePools modelarts.ResourcePool
if err = json.Unmarshal([]byte(setting.ResourcePools), &resourcePools); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["resource_pools"] = resourcePools.Info

var engines modelarts.Engine
if err = json.Unmarshal([]byte(setting.Engines), &engines); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["engines"] = engines.Info

var versionInfos modelarts.VersionInfo
if err = json.Unmarshal([]byte(setting.EngineVersions), &versionInfos); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["engine_versions"] = versionInfos.Version

prepareCloudbrainTwoTrainSpecs(ctx)

configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom)
if err != nil {
ctx.ServerError("getConfigList failed:", err)
return err
}
var Parameters modelarts.Parameters
if err = json.Unmarshal([]byte(form.Params), &Parameters); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["params"] = Parameters.Parameter
ctx.Data["config_list"] = configList.ParaConfigs
ctx.Data["bootFile"] = form.BootFile
ctx.Data["uuid"] = form.Attachment
_, datasetNames, err := models.GetDatasetInfo(form.Attachment)
if err != nil {
log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
return nil
}
ctx.Data["dataset_name"] = datasetNames
ctx.Data["branch_name"] = form.BranchName
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
ctx.Data["WaitCount"] = waitCount
setMultiNodeIfConfigureMatch(ctx)

return nil
}

func TrainJobNewVersion(ctx *context.Context) {

err := trainJobNewVersionDataPrepare(ctx)
@@ -977,26 +926,18 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error {
ctx.Data["spec_id"] = spec.ID
}

var Parameters modelarts.Parameters
if err = json.Unmarshal([]byte(task.Parameters), &Parameters); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["params"] = Parameters.Parameter
ctx.Data["run_para_list"] = task.Parameters

branches, _, err := ctx.Repo.GitRepo.GetBranches(0, 0)
if err != nil {
ctx.ServerError("GetBranches error:", err)
return err
}
_, _, datasetNames, _, err := getDatasUrlListByUUIDS(task.Uuid)
if err != nil {
log.Info("query dataset error," + err.Error())
//ctx.ServerError("GetAllUserAttachments failed:", err)
//return err
} else {
ctx.Data["dataset_name"] = datasetNames
}

uuids, datasetNames := dataset.GetFilterDeletedAttachments(task.Uuid)

ctx.Data["dataset_name"] = datasetNames

ctx.Data["branches"] = branches
ctx.Data["branch_name"] = task.BranchName
ctx.Data["description"] = task.Description
@@ -1005,104 +946,24 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error {
ctx.Data["work_server_number"] = task.WorkServerNumber
ctx.Data["flavor_name"] = task.FlavorName
ctx.Data["engine_name"] = task.EngineName
ctx.Data["uuid"] = task.Uuid
ctx.Data["attachment"] = uuids
ctx.Data["flavor_code"] = task.FlavorCode
ctx.Data["engine_id"] = task.EngineID
ctx.Data["datasetType"] = models.TypeCloudBrainTwo

configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom)
if err != nil {
ctx.ServerError("getConfigList failed:", err)
return err
}
ctx.Data["config_list"] = configList.ParaConfigs
waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
ctx.Data["WaitCount"] = waitCount

return nil
}

func versionErrorDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) error {
ctx.Data["PageIsCloudBrain"] = true
var jobID = ctx.Params(":jobid")
// var versionName = ctx.Params(":version-name")
var versionName = ctx.Query("version_name")

task, err := models.GetCloudbrainByJobIDAndVersionName(jobID, versionName)
if err != nil {
log.Error("GetCloudbrainByJobIDAndVersionName(%s) failed:%v", jobID, err.Error())
return err
}

t := time.Now()
var jobName = jobNamePrefixValid(cutString(ctx.User.Name, 5)) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:]
ctx.Data["job_name"] = task.JobName

attachs, err := models.GetModelArtsTrainAttachments(ctx.User.ID)
if err != nil {
ctx.ServerError("GetAllUserAttachments failed:", err)
return err
}
ctx.Data["attachments"] = attachs

var resourcePools modelarts.ResourcePool
if err = json.Unmarshal([]byte(setting.ResourcePools), &resourcePools); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["resource_pools"] = resourcePools.Info

var engines modelarts.Engine
if err = json.Unmarshal([]byte(setting.Engines), &engines); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["engines"] = engines.Info

var versionInfos modelarts.VersionInfo
if err = json.Unmarshal([]byte(setting.EngineVersions), &versionInfos); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["engine_versions"] = versionInfos.Version

prepareCloudbrainTwoTrainSpecs(ctx)

var Parameters modelarts.Parameters
if err = json.Unmarshal([]byte(form.Params), &Parameters); err != nil {
ctx.ServerError("json.Unmarshal failed:", err)
return err
}
ctx.Data["params"] = Parameters.Parameter
//pretrain model
ctx.Data["model_name"] = task.ModelName
ctx.Data["model_version"] = task.ModelVersion
ctx.Data["ckpt_name"] = task.CkptName
ctx.Data["label_names"] = task.LabelName
ctx.Data["pre_train_model_url"] = task.PreTrainModelUrl

outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath
ctx.Data["train_url"] = outputObsPath

branches, _, err := ctx.Repo.GitRepo.GetBranches(0, 0)
if err != nil {
ctx.ServerError("GetBranches error:", err)
return err
}
ctx.Data["branches"] = branches
ctx.Data["description"] = form.Description
ctx.Data["dataset_name"] = task.DatasetName
ctx.Data["work_server_number"] = form.WorkServerNumber
ctx.Data["flavor_name"] = form.FlavorName
ctx.Data["engine_name"] = form.EngineName
ctx.Data["flavor_code"] = task.FlavorCode
ctx.Data["engine_id"] = task.EngineID
ctx.Data["version_name"] = form.VersionName

ctx.Data["bootFile"] = form.BootFile
ctx.Data["uuid"] = form.Attachment
ctx.Data["branch_name"] = form.BranchName
configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom)
if err != nil {
ctx.ServerError("getConfigList failed:", err)
return err
}
ctx.Data["config_list"] = configList.ParaConfigs
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
ctx.Data["WaitCount"] = waitCount

@@ -1136,21 +997,31 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)

errStr := checkMultiNode(ctx.User.ID, form.WorkServerNumber)
if errStr != "" {
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobNew, &form)
return
}

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeTrain), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplModelArtsTrainJobNew, &form)
return
}
defer lock.UnLock()

count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID)
if err != nil {
log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"])
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tplModelArtsTrainJobNew, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsTrainJobNew, &form)
return
}
@@ -1158,7 +1029,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)

if err := paramCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobNew, &form)
return
}
@@ -1166,7 +1037,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplModelArtsTrainJobNew, &form)
return
}
@@ -1177,23 +1048,30 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
Cluster: models.OpenICluster,
AiCenterCode: models.AICenterOfCloudBrainTwo})
if err != nil || spec == nil {
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("Resource specification not available", tplModelArtsTrainJobNew, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice*form.WorkServerNumber) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsTrainJobNew, &form)
return
}

//Determine whether the task name of the task in the project is duplicated
tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeTrain), displayJobName)
if err == nil {
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("the job name did already exist", tplModelArtsTrainJobNew, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tplModelArtsTrainJobNew, &form)
return
}
@@ -1210,7 +1088,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)

if err := downloadCode(repo, codeLocalPath, branchName); err != nil {
log.Error("downloadCode failed, server timed out: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobNew, &form)
return
}
@@ -1218,14 +1096,14 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
//todo: upload code (send to file_server todo this work?)
if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.OutputPath + VersionOutputPath + "/"); err != nil {
log.Error("Failed to obsMkdir_output: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("Failed to obsMkdir_output", tplModelArtsTrainJobNew, &form)
return
}

if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.LogPath + VersionOutputPath + "/"); err != nil {
log.Error("Failed to obsMkdir_log: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("Failed to obsMkdir_log", tplModelArtsTrainJobNew, &form)
return
}
@@ -1234,7 +1112,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
if err := uploadCodeToObs(codeLocalPath, jobName, ""); err != nil {
// if err := uploadCodeToObs(codeLocalPath, jobName, parentDir); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobNew, &form)
return
}
@@ -1246,7 +1124,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
err := json.Unmarshal([]byte(params), &parameters)
if err != nil {
log.Error("Failed to Unmarshal params: %s (%v)", params, err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("运行参数错误", tplModelArtsTrainJobNew, &form)
return
}
@@ -1272,7 +1150,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
datasUrlList, dataUrl, datasetNames, isMultiDataset, err := getDatasUrlListByUUIDS(uuid)
if err != nil {
log.Error("Failed to getDatasUrlListByUUIDS: %v", err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("Failed to getDatasUrlListByUUIDS:"+err.Error(), tplModelArtsTrainJobNew, &form)
return
}
@@ -1280,7 +1158,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
jsondatas, err := json.Marshal(datasUrlList)
if err != nil {
log.Error("Failed to Marshal: %v", err)
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr("json error:"+err.Error(), tplModelArtsTrainJobNew, &form)
return
}
@@ -1290,6 +1168,13 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
Value: string(jsondatas),
})
}
if form.ModelName != "" { //使用预训练模型训练
ckptUrl := "/" + form.PreTrainModelUrl + form.CkptName
param = append(param, models.Parameter{
Label: modelarts.CkptUrl,
Value: "s3:/" + ckptUrl,
})
}

//save param config
// if isSaveParam == "on" {
@@ -1358,6 +1243,15 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
DatasetName: datasetNames,
Spec: spec,
}
if form.ModelName != "" { //使用预训练模型训练
req.ModelName = form.ModelName
req.LabelName = form.LabelName
req.CkptName = form.CkptName
req.ModelVersion = form.ModelVersion
req.PreTrainModelUrl = form.PreTrainModelUrl

}

userCommand, userImageUrl := getUserCommand(engineID, req)
req.UserCommand = userCommand
req.UserImageUrl = userImageUrl
@@ -1372,7 +1266,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
err = modelarts.GenerateTrainJob(ctx, req)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error())
trainJobErrorNewDataPrepare(ctx, form)
trainJobNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobNew, &form)
return
}
@@ -1457,7 +1351,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ

errStr := checkMultiNode(ctx.User.ID, form.WorkServerNumber)
if errStr != "" {
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr(errStr), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1465,13 +1359,13 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID)
if err != nil {
log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"])
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("system error", tplModelArtsTrainJobVersionNew, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1506,16 +1400,26 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
EngineName := form.EngineName
isLatestVersion := modelarts.IsLatestVersion

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeTrain), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplModelArtsTrainJobVersionNew, &form)
return
}
defer lock.UnLock()

canNewJob, _ := canUserCreateTrainJobVersion(ctx, latestTask.UserID)
if !canNewJob {
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("user cann't new trainjob", tplModelArtsTrainJobVersionNew, &form)
return
}

if err := paramCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1523,7 +1427,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
if err != nil || !bootFileExist {
log.Error("Get bootfile error:", err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1534,10 +1438,16 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
Cluster: models.OpenICluster,
AiCenterCode: models.AICenterOfCloudBrainTwo})
if err != nil || spec == nil {
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("Resource specification not available", tplModelArtsTrainJobVersionNew, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d", ctx.User.ID, spec.ID)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsTrainJobVersionNew, &form)
return
}

//todo: del the codeLocalPath
_, err = ioutil.ReadDir(codeLocalPath)
@@ -1549,7 +1459,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
commitID, _ := gitRepo.GetBranchCommitID(branchName)
if err := downloadCode(repo, codeLocalPath, branchName); err != nil {
log.Error("Failed git clone repo to local(!: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1557,14 +1467,14 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
//todo: upload code (send to file_server todo this work?)
if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.OutputPath + VersionOutputPath + "/"); err != nil {
log.Error("Failed to obsMkdir_output: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("Failed to obsMkdir_output", tplModelArtsTrainJobVersionNew, &form)
return
}

if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.LogPath + VersionOutputPath + "/"); err != nil {
log.Error("Failed to obsMkdir_log: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("Failed to obsMkdir_log", tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1574,7 +1484,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
// if err := uploadCodeToObs(codeLocalPath, jobName, ""); err != nil {
if err := uploadCodeToObs(codeLocalPath, jobName, parentDir); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1588,7 +1498,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
err := json.Unmarshal([]byte(params), &parameters)
if err != nil {
log.Error("Failed to Unmarshal params: %s (%v)", params, err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("运行参数错误", tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1614,7 +1524,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
datasUrlList, dataUrl, datasetNames, isMultiDataset, err := getDatasUrlListByUUIDS(uuid)
if err != nil {
log.Error("Failed to getDatasUrlListByUUIDS: %v", err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("Failed to getDatasUrlListByUUIDS:"+err.Error(), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1622,7 +1532,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
jsondatas, err := json.Marshal(datasUrlList)
if err != nil {
log.Error("Failed to Marshal: %v", err)
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr("json error:"+err.Error(), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1633,46 +1543,13 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
})
}

// //save param config
// if isSaveParam == "on" {
// saveparams := append(param, models.Parameter{
// Label: modelarts.TrainUrl,
// Value: outputObsPath,
// }, models.Parameter{
// Label: modelarts.DataUrl,
// Value: dataPath,
// })
// if form.ParameterTemplateName == "" {
// log.Error("ParameterTemplateName is empty")
// versionErrorDataPrepare(ctx, form)
// ctx.RenderWithErr("保存作业参数时,作业参数名称不能为空", tplModelArtsTrainJobVersionNew, &form)
// return
// }

// _, err := modelarts.CreateTrainJobConfig(models.CreateConfigParams{
// ConfigName: form.ParameterTemplateName,
// Description: form.PrameterDescription,
// DataUrl: dataPath,
// AppUrl: codeObsPath,
// BootFileUrl: codeObsPath + bootFile,
// TrainUrl: outputObsPath,
// Flavor: models.Flavor{
// Code: flavorCode,
// },
// WorkServerNum: workServerNumber,
// EngineID: int64(engineID),
// LogUrl: logObsPath,
// PoolID: poolID,
// Parameter: saveparams,
// })

// if err != nil {
// log.Error("Failed to CreateTrainJobConfig: %v", err)
// versionErrorDataPrepare(ctx, form)
// ctx.RenderWithErr("保存作业参数失败:"+err.Error(), tplModelArtsTrainJobVersionNew, &form)
// return
// }
// }
if form.ModelName != "" { //使用预训练模型训练
ckptUrl := "/" + form.PreTrainModelUrl + form.CkptName
param = append(param, models.Parameter{
Label: modelarts.CkptUrl,
Value: "s3:/" + ckptUrl,
})
}

task, err := models.GetCloudbrainByJobIDAndVersionName(jobID, PreVersionName)
if err != nil {
@@ -1707,6 +1584,15 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
DatasetName: datasetNames,
Spec: spec,
}

if form.ModelName != "" { //使用预训练模型训练
req.ModelName = form.ModelName
req.LabelName = form.LabelName
req.CkptName = form.CkptName
req.ModelVersion = form.ModelVersion
req.PreTrainModelUrl = form.PreTrainModelUrl

}
userCommand, userImageUrl := getUserCommand(engineID, req)
req.UserCommand = userCommand
req.UserImageUrl = userImageUrl
@@ -1714,7 +1600,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
err = modelarts.GenerateTrainJobVersion(ctx, req, jobID)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error())
versionErrorDataPrepare(ctx, form)
trainJobNewVersionDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -2003,7 +1889,6 @@ func TrainJobStop(ctx *context.Context) {
ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobIndex, nil)
return
}

ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=" + listType)
}

@@ -2112,6 +1997,16 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
return
}

lock := redis_lock.NewDistributeLock(redis_key.CloudbrainBindingJobNameKey(fmt.Sprint(repo.ID), string(models.JobTypeInference), displayJobName))
isOk, err := lock.Lock(models.CloudbrainKeyDuration)
if !isOk {
log.Error("lock processed failed:%v", err, ctx.Data["MsgID"])
inferenceJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_samejob_err"), tplModelArtsInferenceJobNew, &form)
return
}
defer lock.UnLock()

count, err := models.GetCloudbrainInferenceJobCountByUserID(ctx.User.ID)
if err != nil {
log.Error("GetCloudbrainInferenceJobCountByUserID failed:%v", err, ctx.Data["MsgID"])
@@ -2170,6 +2065,13 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
ctx.RenderWithErr("Resource specification not available", tplModelArtsInferenceJobNew, &form)
return
}
if !account.IsPointBalanceEnough(ctx.User.ID, spec.UnitPrice) {
log.Error("point balance is not enough,userId=%d specId=%d ", ctx.User.ID, spec.ID)
inferenceJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr(ctx.Tr("points.insufficient_points_balance"), tplModelArtsInferenceJobNew, &form)
return
}

//todo: del the codeLocalPath
_, err = ioutil.ReadDir(codeLocalPath)
if err == nil {
@@ -2419,6 +2321,7 @@ func InferenceJobIndex(ctx *context.Context) {
RepoID: repoId,
Type: Type,
New: MODEL_LATEST,
Status: 0,
})
ctx.Data["MODEL_COUNT"] = model_count

@@ -2499,6 +2402,7 @@ func inferenceJobNewDataPrepare(ctx *context.Context) error {
RepoID: repoId,
Type: Type,
New: MODEL_LATEST,
Status: 0,
})
ctx.Data["MODEL_COUNT"] = model_count
ctx.Data["datasetType"] = models.TypeCloudBrainTwo


+ 440
- 0
routers/repo/user_invitation.go View File

@@ -0,0 +1,440 @@
package repo

import (
"fmt"
"net/http"
"net/url"
"sort"
"time"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/360EntSecGroup-Skylar/excelize/v2"
)

func QueryInvitationCurrentMonth(ctx *context.Context) {
// userName := ctx.Query("userName")
// currentTimeNow := time.Now()
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
// pageStartTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location())

//queryUserDataPage(ctx, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth))
//_, count := models.QueryUserStaticDataByTableName(1, 1, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth), userName, 1)

queryDataFromStaticTable(ctx, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth))
}

func getInvitationExcelHeader(ctx *context.Context) map[string]string {
excelHeader := make([]string, 0)
excelHeader = append(excelHeader, ctx.Tr("user.static.id"))
excelHeader = append(excelHeader, ctx.Tr("user.static.name"))
excelHeader = append(excelHeader, ctx.Tr("user.static.invitationNum"))
excelHeader = append(excelHeader, ctx.Tr("user.static.phone"))
excelHeader = append(excelHeader, ctx.Tr("user.static.registdate"))

excelHeaderMap := make(map[string]string, 0)
var i byte
i = 0
for _, value := range excelHeader {
excelColumn := getColumn(i) + fmt.Sprint(1)
excelHeaderMap[excelColumn] = value
i++
}
return excelHeaderMap
}

func getInvitationDetailExcelHeader(ctx *context.Context) map[string]string {
excelHeader := make([]string, 0)
excelHeader = append(excelHeader, ctx.Tr("user.static.id"))
excelHeader = append(excelHeader, ctx.Tr("user.static.name"))
excelHeader = append(excelHeader, ctx.Tr("user.static.srcUserId"))
excelHeader = append(excelHeader, ctx.Tr("user.static.phone"))
excelHeader = append(excelHeader, ctx.Tr("user.static.registdate"))

excelHeaderMap := make(map[string]string, 0)
var i byte
i = 0
for _, value := range excelHeader {
excelColumn := getColumn(i) + fmt.Sprint(1)
excelHeaderMap[excelColumn] = value
i++
}
return excelHeaderMap
}

func writeInvitationExcel(row int, xlsx *excelize.File, sheetName string, userRecord *models.UserBusinessAnalysisAll) {
rows := fmt.Sprint(row)
var tmp byte
tmp = 0
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.ID)
tmp = tmp + 1
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Name)
tmp = tmp + 1

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.InvitationUserNum)
tmp = tmp + 1

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Phone)
tmp = tmp + 1

formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05")
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, formatTime[0:len(formatTime)-3])

}

func writeInvitationDetailExcel(row int, xlsx *excelize.File, sheetName string, userRecord *models.Invitation) {
rows := fmt.Sprint(row)
var tmp byte
tmp = 0
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.UserID)
tmp = tmp + 1
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Name)
tmp = tmp + 1

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.SrcUserID)
tmp = tmp + 1

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Phone)
tmp = tmp + 1

formatTime := userRecord.CreatedUnix.Format("2006-01-02 15:04:05")
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, formatTime[0:len(formatTime)-3])

}

func DownloadInvitationDetail(ctx *context.Context) {
xlsx := excelize.NewFile()
sheetName := ctx.Tr("user.static.invitationdetailsheetname")
index := xlsx.NewSheet(sheetName)
xlsx.DeleteSheet("Sheet1")
excelHeader := getInvitationDetailExcelHeader(ctx)
for k, v := range excelHeader {
//设置单元格的值
xlsx.SetCellValue(sheetName, k, v)
}
userNameMap := models.GetAllUserName()
_, count := models.QueryInvitaionPage(1, 1)
var indexTotal int64
indexTotal = 0
row := 1
for {
re, _ := models.QueryInvitaionPage(int(indexTotal), PAGE_SIZE)
log.Info("return count=" + fmt.Sprint(count))
for _, userRecord := range re {
row++
userRecord.Name = userNameMap[userRecord.UserID]
if userRecord.Name == "" {
userRecord.Name = "已注销"
}
writeInvitationDetailExcel(row, xlsx, sheetName, userRecord)
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
//设置默认打开的表单
xlsx.SetActiveSheet(index)
filename := sheetName + ".xlsx"
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(filename))
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
if _, err := xlsx.WriteTo(ctx.Resp); err != nil {
log.Info("writer exel error." + err.Error())
}
}

func queryDataFromStaticTable(ctx *context.Context, tableName string, queryObj interface{}) {
page, pageSize := getPageInfo(ctx)
userName := ctx.Query("userName")
IsReturnFile := ctx.QueryBool("IsReturnFile")

if IsReturnFile {
//writer exec file.
xlsx := excelize.NewFile()
sheetName := ctx.Tr("user.static.invitationsheetname")
index := xlsx.NewSheet(sheetName)
xlsx.DeleteSheet("Sheet1")
excelHeader := getInvitationExcelHeader(ctx)
for k, v := range excelHeader {
//设置单元格的值
xlsx.SetCellValue(sheetName, k, v)
}
_, count := models.QueryUserInvitationDataByTableName(1, 1, tableName, queryObj, "", 1)
var indexTotal int64
indexTotal = 0
row := 1
for {
re, _ := models.QueryUserInvitationDataByTableName(int(indexTotal), PAGE_SIZE, tableName, queryObj, "", 1)
log.Info("return count=" + fmt.Sprint(count))
for _, userRecord := range re {
row++
writeInvitationExcel(row, xlsx, sheetName, userRecord)
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
//设置默认打开的表单
xlsx.SetActiveSheet(index)
filename := sheetName + "_" + ctx.Tr("user.static."+tableName) + ".xlsx"
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(filename))
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
if _, err := xlsx.WriteTo(ctx.Resp); err != nil {
log.Info("writer exel error." + err.Error())
}
} else {
resultRecord, count := models.QueryUserInvitationDataByTableName((page-1)*pageSize, pageSize, tableName, queryObj, userName, 1)
result := make([]models.Invitation, 0)
for _, record := range resultRecord {
invi := models.Invitation{
SrcUserID: record.ID,
Name: record.Name,
InvitationUserNum: record.InvitationUserNum,
Phone: record.Phone,
CreatedUnix: record.RegistDate,
}
result = append(result, invi)
}
mapInterface := make(map[string]interface{})
mapInterface["data"] = result
mapInterface["count"] = count
ctx.JSON(http.StatusOK, mapInterface)
}
}

func QueryInvitationCurrentWeek(ctx *context.Context) {
// currentTimeNow := time.Now()
// offset := int(time.Monday - currentTimeNow.Weekday())
// if offset > 0 {
// offset = -6
// }
// pageStartTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset)
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())
queryDataFromStaticTable(ctx, "public.user_business_analysis_current_week", new(models.UserBusinessAnalysisCurrentWeek))
}

func QueryInvitationLastWeek(ctx *context.Context) {
// currentTimeNow := time.Now()
// offset := int(time.Monday - currentTimeNow.Weekday())
// if offset > 0 {
// offset = -6
// }
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset)
// pageStartTime := pageEndTime.AddDate(0, 0, -7)
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_last_week", new(models.UserBusinessAnalysisLastWeek))
}

func QueryInvitationCurrentYear(ctx *context.Context) {
// currentTimeNow := time.Now()
// pageStartTime := time.Date(currentTimeNow.Year(), 1, 1, 0, 0, 0, 0, currentTimeNow.Location())
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_current_year", new(models.UserBusinessAnalysisCurrentYear))
}

func QueryInvitationLast30Day(ctx *context.Context) {
// currentTimeNow := time.Now()
// pageStartTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -30)
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_last30_day", new(models.UserBusinessAnalysisLast30Day))
}

func QueryInvitationLastMonth(ctx *context.Context) {
// currentTimeNow := time.Now()
// thisMonth := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location())
// pageStartTime := thisMonth.AddDate(0, -1, 0)
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1)
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_last_month", new(models.UserBusinessAnalysisLastMonth))
}

func QueryInvitationYesterday(ctx *context.Context) {
// currentTimeNow := time.Now().AddDate(0, 0, -1)
// pageStartTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local)
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location())
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_yesterday", new(models.UserBusinessAnalysisYesterday))
}

func QueryInvitationAll(ctx *context.Context) {
// currentTimeNow := time.Now()
// pageStartTime := time.Date(2022, 8, 5, 0, 0, 0, 0, currentTimeNow.Location())
// pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
// queryData(ctx, pageStartTime.Unix(), pageEndTime.Unix())

queryDataFromStaticTable(ctx, "public.user_business_analysis_all", new(models.UserBusinessAnalysisAll))
}

func QueryUserDefineInvitationPage(ctx *context.Context) {
startDate := ctx.Query("startDate")
endDate := ctx.Query("endDate")
startTime, _ := time.ParseInLocation("2006-01-02", startDate, time.Local)
//startTime = startTime.UTC()
endTime, _ := time.ParseInLocation("2006-01-02", endDate, time.Local)

queryData(ctx, startTime, endTime)
}

func queryData(ctx *context.Context, startTime time.Time, endTime time.Time) {
page, pageSize := getPageInfo(ctx)
IsReturnFile := ctx.QueryBool("IsReturnFile")

dbResult := models.QueryInvitaionByTime(startTime.Unix(), endTime.Unix())

invitaionNumMap := make(map[int64]int, 0)
allUserIds := make([]int64, 0)
for _, record := range dbResult {
if _, ok := invitaionNumMap[record.SrcUserID]; !ok {
invitaionNumMap[record.SrcUserID] = 1
} else {
invitaionNumMap[record.SrcUserID] = invitaionNumMap[record.SrcUserID] + 1
}
}
invitaionNumList := make([]models.Invitation, 0)
for key, value := range invitaionNumMap {
invi := models.Invitation{
SrcUserID: key,
InvitationUserNum: value,
}
invitaionNumList = append(invitaionNumList, invi)
allUserIds = append(allUserIds, key)
}
sort.Slice(invitaionNumList, func(i, j int) bool {
return invitaionNumList[i].InvitationUserNum > invitaionNumList[j].InvitationUserNum
})
if IsReturnFile {
xlsx := excelize.NewFile()
sheetName := ctx.Tr("user.static.invitationsheetname")
index := xlsx.NewSheet(sheetName)
xlsx.DeleteSheet("Sheet1")
excelHeader := getInvitationExcelHeader(ctx)
for k, v := range excelHeader {
//设置单元格的值
xlsx.SetCellValue(sheetName, k, v)
}
end := 100
userMap := make(map[int64]*models.User, 0)
log.Info("len(allUserIds)=" + fmt.Sprint(len(allUserIds)))
for i := 0; i < len(allUserIds); i += 100 {
if end >= len(allUserIds) {
end = len(allUserIds)
}
log.Info("i=" + fmt.Sprint(i) + " end=" + fmt.Sprint(end))
if i == end {
break
}
userList, err := models.GetUsersByIDs(allUserIds[i:end])
if err == nil {
for _, tmp := range userList {
userMap[tmp.ID] = tmp
}
} else {

}
end = end + 100
}
row := 1
log.Info("len(userMap)=" + fmt.Sprint(len(userMap)))
for _, userRecord := range invitaionNumList {
row++
rows := fmt.Sprint(row)
var tmp byte
tmp = 0

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.SrcUserID)
tmp = tmp + 1
name := "已注销"
if userMap[userRecord.SrcUserID] != nil {
name = userMap[userRecord.SrcUserID].Name
}
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, name)
tmp = tmp + 1

xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.InvitationUserNum)
tmp = tmp + 1
Phone := ""
if userMap[userRecord.SrcUserID] != nil {
Phone = userMap[userRecord.SrcUserID].PhoneNumber
}
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, Phone)
tmp = tmp + 1

formatTime := ""
if userMap[userRecord.SrcUserID] != nil {
formatTime = userMap[userRecord.SrcUserID].CreatedUnix.Format("2006-01-02 15:04:05")
formatTime = formatTime[0 : len(formatTime)-3]
}
xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, formatTime)
}
//设置默认打开的表单
xlsx.SetActiveSheet(index)
filename := sheetName + "_" + getTimeFileName(startTime) + "_" + getTimeFileName(endTime) + ".xlsx"
//filename := sheetName + "_" + ctx.Tr("user.static."+tableName) + ".xlsx"
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(filename))
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
if _, err := xlsx.WriteTo(ctx.Resp); err != nil {
log.Info("writer exel error." + err.Error())
}
} else {
result := make([]*models.Invitation, 0)
userIds := make([]int64, 0)
end := len(invitaionNumList) - 1
for start := (page - 1) * pageSize; start <= end; start++ {
invi := invitaionNumList[start]
//todo name phone,createunix
result = append(result, &invi)
userIds = append(userIds, invi.SrcUserID)
if len(result) == pageSize {
break
}
}
userList, err := models.GetUsersByIDs(userIds)
if err == nil {
for _, invi := range result {
tmpUser := userList[0]
for _, tmp := range userList {
if tmp.ID == invi.SrcUserID {
tmpUser = tmp
break
}
}
if invi.SrcUserID == tmpUser.ID {
invi.Name = tmpUser.Name
invi.Phone = tmpUser.PhoneNumber
invi.CreatedUnix = tmpUser.CreatedUnix
} else {
invi.Name = "已注销"
}
}
} else {
log.Info("query user error." + err.Error())
}
mapInterface := make(map[string]interface{})
mapInterface["data"] = result
mapInterface["count"] = len(invitaionNumList)
ctx.JSON(http.StatusOK, mapInterface)
}
}

func getPageInfo(ctx *context.Context) (int, int) {
page := ctx.QueryInt("page")
if page <= 0 {
page = 1
}
pageSize := ctx.QueryInt("pageSize")
if pageSize <= 0 {
pageSize = setting.UI.IssuePagingNum
}
return page, pageSize
}

+ 24
- 0
routers/reward/point/account.go View File

@@ -0,0 +1,24 @@
package point

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/reward/point/account"
"net/http"
)

func SearchPointAccount(ctx *context.Context) {
q := ctx.Query("q")
page := ctx.QueryInt("page")
resopnse, err := account.SearchPointAccount(models.SearchPointAccountOpts{ListOptions: models.ListOptions{Page: page, PageSize: 20}, Keyword: q})
if err != nil {
log.Error("SearchPointAccount error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}

ctx.JSON(http.StatusOK, response.SuccessWithData(resopnse))
return
}

+ 45
- 0
routers/reward/point/limit.go View File

@@ -0,0 +1,45 @@
package point

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/reward/limiter"
"net/http"
)

func GetSingleDailyPointLimitConfig(ctx *context.Context) {
r, err := limiter.GetSingleDailyPointLimitConfig()
if err != nil {
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
resultMap := make(map[string]interface{}, 0)
if r == nil {
resultMap["LimitNum"] = ""
} else {
resultMap["LimitNum"] = r.LimitNum
}
ctx.JSON(http.StatusOK, response.SuccessWithData(resultMap))
}

func SetSingleDailyPointLimitConfig(ctx *context.Context, config models.LimitConfigVO) {
err := limiter.SetSingleDailyPointLimitConfig(config.LimitNum, ctx.User)
if err != nil {
log.Error("Set single daily point limit config error. %v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.Success())
}

func DeletePointLimitConfig(ctx *context.Context) {
id := ctx.QueryInt64("id")
err := limiter.DeleteLimitConfig(id, ctx.User)
if err != nil {
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.Success())
}

+ 170
- 0
routers/reward/point/point.go View File

@@ -0,0 +1,170 @@
package point

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/reward"
"code.gitea.io/gitea/services/reward/point/account"
"code.gitea.io/gitea/services/task"
"errors"
"net/http"
)

const tplPoint base.TplName = "reward/point"
const tplPointRule base.TplName = "reward/point/rule"

type AccountResponse struct {
Balance int64
TotalEarned int64
TotalConsumed int64
}

func GetPointAccount(ctx *context.Context) {
userId := ctx.User.ID
a, err := account.GetAccount(userId)
if err != nil {
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
res := &AccountResponse{
Balance: a.Balance,
TotalEarned: a.TotalEarned,
TotalConsumed: a.TotalConsumed,
}
ctx.JSON(http.StatusOK, response.SuccessWithData(res))
}

func GetPointRecordList(ctx *context.Context) {
operateType := ctx.Query("Operate")
page := ctx.QueryInt("Page")
var orderBy models.RewardOperateOrderBy
switch ctx.Query("sort") {
default:
orderBy = models.RewardOrderByIDDesc
}
t := models.GetRewardOperateTypeInstance(operateType)
if t == "" {
ctx.JSON(http.StatusOK, response.ServerError("param error"))
return
}

r, err := reward.GetRewardRecordList(&models.RewardRecordListOpts{
ListOptions: models.ListOptions{PageSize: 10, Page: page},
UserId: ctx.User.ID,
OperateType: t,
RewardType: models.RewardTypePoint,
OrderBy: orderBy,
IsAdmin: false,
UserName: ctx.User.Name,
})
if err != nil {
log.Error("GetPointRecordList error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}

ctx.JSON(http.StatusOK, response.SuccessWithData(r))
return
}

func OperatePointAccountBalance(ctx *context.Context, req models.AdminRewardOperateReq) {
req.RewardType = models.RewardTypePoint
if req.OperateType.Name() == "" || req.Remark == "" {
ctx.JSON(http.StatusOK, "param error")
return
}
err := reward.AdminBalanceOperate(req, ctx.User)
if err != nil {
log.Error("OperatePointAccountBalance error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.Success())
}

func GetPointPage(ctx *context.Context) {
ctx.HTML(200, tplPoint)
}

func GetRulePage(ctx *context.Context) {
ctx.HTML(200, tplPointRule)
}

func GetRuleConfig(ctx *context.Context) {
r, err := task.GetPointRule()
if err != nil {
log.Error("GetRuleConfig error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}

ctx.JSON(http.StatusOK, response.SuccessWithData(r))
}

func GetAdminRewardList(ctx *context.Context) {
opts, err := buildAdminRewardRecordListOpts(ctx)
if err != nil {
log.Error("buildAdminRewardRecordListOpts error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}

username := ctx.Query("userName")
if username != "" {
user, err := models.GetUserByName(username)
if err != nil {
log.Error("GetUserByName error.%v", err)
if models.IsErrUserNotExist(err) {
ctx.JSON(http.StatusOK, response.ServerError("user not exist"))
} else {
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
}
return
}
opts.UserId = user.ID
opts.UserName = user.Name
}

r, err := reward.GetRewardRecordList(opts)
if err != nil {
log.Error("GetRewardRecordList error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}

ctx.JSON(http.StatusOK, response.SuccessWithData(r))
}

func buildAdminRewardRecordListOpts(ctx *context.Context) (*models.RewardRecordListOpts, error) {
operateType := ctx.Query("operate")
sourceType := ctx.Query("source")
taskType := ctx.Query("action")
serialNo := ctx.Query("serialNo")
status := ctx.Query("status")

page := ctx.QueryInt("page")
var orderBy models.RewardOperateOrderBy
switch ctx.Query("sort") {
default:
orderBy = models.RewardOrderByIDDesc
}
t := models.GetRewardOperateTypeInstance(operateType)
if t == "" {
return nil, errors.New("param error")
}
opts := &models.RewardRecordListOpts{
ListOptions: models.ListOptions{PageSize: 10, Page: page},
OperateType: t,
RewardType: models.RewardTypePoint,
OrderBy: orderBy,
SourceType: sourceType,
TaskType: taskType,
SerialNo: serialNo,
IsAdmin: true,
Status: status,
}
return opts, nil
}

+ 44
- 10
routers/routes/routes.go View File

@@ -6,6 +6,9 @@ package routes

import (
"bytes"
"code.gitea.io/gitea/routers/reward/point"
"code.gitea.io/gitea/routers/task"
"code.gitea.io/gitea/services/reward"
"encoding/gob"
"net/http"
"path"
@@ -328,8 +331,11 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/", routers.Home)
m.Get("/dashboard", routers.Dashboard)
go routers.SocketManager.Run()
go task.RunTask()
go reward.AcceptStatusChangeAction()
m.Get("/action/notification", routers.ActionNotification)
m.Get("/recommend/home", routers.RecommendHomeInfo)
m.Get("/dashboard/invitation", routers.GetMapInfo)
//m.Get("/recommend/org", routers.RecommendOrgFromPromote)
//m.Get("/recommend/repo", routers.RecommendRepoFromPromote)
m.Get("/recommend/userrank/:index", routers.GetUserRankFromPromote)
@@ -504,6 +510,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/forgot_password", user.ForgotPasswd)
m.Post("/forgot_password", user.ForgotPasswdPost)
m.Post("/logout", user.SignOut)
m.Get("/invitation_code", reqSignIn, user.GetInvitaionCode)
m.Get("/invitation_tpl", reqSignIn, user.InviationTpl)
})
// ***** END: User *****

@@ -640,6 +648,20 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/operation", func() {
m.Get("/config/recommend_org", operation.Organizations)
m.Post("/config/recommend_org", bindIgnErr(operation.OrgInfos{}), operation.UpdateRecommendOrganizations)

m.Group("/reward/point", func() {
m.Combo("/limiter/single-daily").Get(point.GetSingleDailyPointLimitConfig).Post(bindIgnErr(models.LimitConfigVO{}), point.SetSingleDailyPointLimitConfig)
m.Post("/limiter/delete", point.DeletePointLimitConfig)
m.Get("/account/search", point.SearchPointAccount)
m.Post("/account/operate", binding.Bind(models.AdminRewardOperateReq{}), point.OperatePointAccountBalance)
m.Get("/list", point.GetAdminRewardList)
})

m.Group("/task/config", func() {
m.Get("/list", task.GetTaskConfigList)
m.Post("/add/batch", bindIgnErr(models.BatchLimitConfigVO{}), task.BatchAddTaskConfig)
m.Post("/^:action(new|edit|del)$", bindIgnErr(models.TaskConfigWithLimit{}), task.OperateTaskConfig)
})
}, operationReq)
// ***** END: Operation *****

@@ -1113,7 +1135,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels)
m.Get("/download_model", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDownloadModel)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate)

m.Group("/benchmark", func() {
@@ -1124,7 +1146,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.BenchmarkDel)
m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainBenchmarkNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainBenchmarkNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainBenchmarkCreate)
m.Get("/get_child_types", repo.GetChildTypes)
})
@@ -1137,8 +1159,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/download_model", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo.CloudBrainDownloadModel)
//m.Get("/get_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo.GetLogFromModelDir)
//m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion)
m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, repo.CloudBrainTrainJobVersionNew)
m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainTrainJobVersionCreate)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.CloudBrainTrainJobNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.CloudBrainTrainJobNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate)
})
m.Group("/inference-job", func() {
@@ -1148,7 +1172,7 @@ func RegisterRoutes(m *macaron.Macaron) {

m.Get("/downloadall", repo.DownloadInferenceResultFile)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.InferenceCloudBrainJobNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.InferenceCloudBrainJobNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainInferencForm{}), repo.CloudBrainInferenceJobCreate)
})
}, context.RepoRef())
@@ -1159,13 +1183,15 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.GrampusStopJob)
m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo.GrampusTrainJobDel)
m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload)
m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, repo.GrampusTrainJobVersionNew)
m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobVersionCreate)
})
m.Group("/gpu", func() {
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.GrampusTrainJobGPUNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobGPUNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobGpuCreate)
})
m.Group("/npu", func() {
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.GrampusTrainJobNPUNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobNPUNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), repo.GrampusTrainJobNpuCreate)
})
})
@@ -1222,7 +1248,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop)
m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.NotebookNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.NotebookNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsNotebookForm{}), repo.Notebook2Create)
})

@@ -1234,10 +1260,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo.TrainJobDel)
m.Get("/model_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ModelDownload)
m.Get("/download_log_file", cloudbrain.AdminOrJobCreaterRightForTrain, repo.TrainJobDownloadLogFile)
m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, repo.TrainJobNewVersion)
m.Get("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, context.PointAccount(), repo.TrainJobNewVersion)
m.Post("/create_version", reqWechatBind, cloudbrain.AdminOrJobCreaterRightForTrain, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.TrainJobNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.TrainJobNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreate)

m.Get("/para-config-list", reqRepoCloudBrainReader, repo.TrainJobGetConfigList)
@@ -1250,7 +1276,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/result_download", cloudbrain.AdminOrJobCreaterRightForTrain, repo.ResultDownload)
m.Get("/downloadall", repo.DownloadMultiResultFile)
})
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, repo.InferenceJobNew)
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.InferenceJobNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsInferenceJobForm{}), repo.InferenceJobCreate)
})
}, context.RepoRef())
@@ -1410,6 +1436,14 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/purge", user.NotificationPurgePost)
}, reqSignIn)

m.Group("/reward/point", func() {
m.Get("", point.GetPointPage)
m.Get("/rule", point.GetRulePage)
m.Get("/rule/config", point.GetRuleConfig)
m.Get("/account", point.GetPointAccount)
m.Get("/record/list", point.GetPointRecordList)
}, reqSignIn)

if setting.API.EnableSwagger {
m.Get("/swagger.v1.json", templates.JSONRenderer(), routers.SwaggerV1Json)
}


+ 68
- 0
routers/task/config.go View File

@@ -0,0 +1,68 @@
package task

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/task"
"errors"
"net/http"
)

func GetTaskConfigList(ctx *context.Context) {
page := ctx.QueryInt("Page")
status := ctx.QueryInt("Status")
action := ctx.Query("Action")
r, err := task.GetTaskConfigWithLimitList(models.GetTaskConfigOpts{
ListOptions: models.ListOptions{PageSize: 20, Page: page},
Status: status,
TaskType: action,
})
if err != nil {
log.Error("GetTaskConfigList error.%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.SuccessWithData(r))
}

func OperateTaskConfig(ctx *context.Context, config models.TaskConfigWithLimit) {
action := ctx.Params(":action")

var err error
switch action {
case "edit":
err = task.EditTaskConfig(config, ctx.User)
case "new":
err = task.AddTaskConfig(config, ctx.User)
case "del":
err = task.DelTaskConfig(config.ID, ctx.User)
default:
err = errors.New("action type error")
}

if err != nil {
log.Error("OperateTaskConfig error ,%v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.Success())
}
func BatchAddTaskConfig(ctx *context.Context, list models.BatchLimitConfigVO) {
successCount := 0
failCount := 0
for _, config := range list.ConfigList {
err := task.AddTaskConfig(config, ctx.User)
if err != nil {
failCount++
} else {
successCount++
}
}
r := make(map[string]int, 2)
r["successCount"] = successCount
r["failCount"] = failCount
log.Debug("BatchAddTaskConfig success.result=%v", r)
ctx.JSON(http.StatusOK, response.SuccessWithData(r))
}

+ 15
- 0
routers/task/task.go View File

@@ -0,0 +1,15 @@
package task

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/services/task"
)

func RunTask() {
for {
select {
case action := <-models.ActionChan4Task:
task.Accomplish(action)
}
}
}

+ 107
- 0
routers/user/Invitation.go View File

@@ -0,0 +1,107 @@
package user

import (
"errors"
"strconv"
"strings"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/services/repository"
)

const (
tplInvitation base.TplName = "user/settings/invite"
)

func GetInvitaionCode(ctx *context.Context) {
page := ctx.QueryInt("page")
if page <= 0 {
page = 1
}
pageSize := ctx.QueryInt("pageSize")
if pageSize <= 0 {
pageSize = setting.UI.IssuePagingNum
}

url := setting.RecommentRepoAddr + "invitaion_page"
result, err := repository.RecommendFromPromote(url)
resultJsonMap := make(map[string]interface{}, 0)
if err == nil {
for _, strLine := range result {
tmpIndex := strings.Index(strLine, "=")
if tmpIndex != -1 {
key := strLine[0:tmpIndex]
value := strLine[tmpIndex+1:]
resultJsonMap[key] = value
}
}
}

if ctx.IsSigned {
resultJsonMap["invitation_code"] = getInvitaionCode(ctx)
re, count := models.QueryInvitaionBySrcUserId(ctx.User.ID, (page-1)*pageSize, pageSize)
for _, record := range re {
tmpUser, err := models.GetUserByID(record.UserID)
if err == nil {
record.Avatar = strings.TrimRight(setting.AppSubURL, "/") + "/user/avatar/" + tmpUser.Name + "/" + strconv.Itoa(-1)
record.IsActive = tmpUser.IsActive
record.Name = tmpUser.Name
}
}
resultJsonMap["invitation_users"] = re
resultJsonMap["invitation_users_count"] = count
}

ctx.JSON(200, resultJsonMap)
}

func InviationTpl(ctx *context.Context) {
ctx.HTML(200, tplInvitation)
}

func RegisteUserByInvitaionCode(invitationcode string, newUserId int64, newPhoneNumber string) error {
user := parseInvitaionCode(invitationcode)
if user == nil {
return errors.New("The invitated user not existed.")
}

if newPhoneNumber != "" {
re := models.QueryInvitaionByPhone(newPhoneNumber)
if re != nil {
if len(re) > 0 {
log.Info("The phone has been invitated. so ingore it.")
return errors.New("The phone has been invitated.")
}
}
} else {
log.Info("the phone number is null. user name=" + user.Name)
}

invitation := &models.Invitation{
SrcUserID: user.ID,
UserID: newUserId,
Phone: newPhoneNumber,
}

err := models.InsertInvitaion(invitation)
if err != nil {
log.Info("insert error," + err.Error())
}
return err
}

func getInvitaionCode(ctx *context.Context) string {
return ctx.User.Name
}

func parseInvitaionCode(invitationcode string) *models.User {
user, err := models.GetUserByName(invitationcode)
if err == nil {
return user
}
return nil
}

+ 28
- 24
routers/user/auth.go View File

@@ -8,11 +8,12 @@ package user
import (
"errors"
"fmt"
"github.com/gomodule/redigo/redis"
"net/http"
"strconv"
"strings"

"github.com/gomodule/redigo/redis"

"code.gitea.io/gitea/modules/slideimage"

phoneService "code.gitea.io/gitea/services/phone"
@@ -352,18 +353,17 @@ func SignInPostCommon(ctx *context.Context, form auth.SignInForm) {
ctx.Redirect(setting.AppSubURL + "/user/two_factor")
}


func SignInCloudBrainPost(ctx *context.Context, form auth.SignInForm) {
ctx.Data["PageIsCloudBrainLogin"] = true
ctx.Data["SignInLink"] = setting.AppSubURL + "/user/login/cloud_brain"
SignInPostCommon(ctx,form)
SignInPostCommon(ctx, form)
}

// SignInPost response for sign in request
func SignInPost(ctx *context.Context, form auth.SignInForm) {
ctx.Data["PageIsLogin"] = true
ctx.Data["SignInLink"] = setting.AppSubURL + "/user/login"
SignInPostCommon(ctx,form)
SignInPostCommon(ctx, form)
}

// TwoFactor shows the user a two-factor authentication page.
@@ -1264,9 +1264,9 @@ func SignUp(ctx *context.Context) {
// SignUpPost response for sign up information submission
func SignUpPost(ctx *context.Context, cpt *captcha.Captcha, form auth.RegisterForm) {
ctx.Data["Title"] = ctx.Tr("sign_up")
invitationCode := ctx.Query("invitation_code")
ctx.Data["SignUpLink"] = setting.AppSubURL + "/user/sign_up"
ctx.Data["invitationCode"] = invitationCode
ctx.Data["EnableCaptcha"] = setting.Service.EnableCaptcha
ctx.Data["RecaptchaURL"] = setting.Service.RecaptchaURL
ctx.Data["CaptchaType"] = setting.Service.CaptchaType
@@ -1366,6 +1366,11 @@ func SignUpPost(ctx *context.Context, cpt *captcha.Captcha, form auth.RegisterFo
}
log.Trace("Account created: %s", u.Name, ctx.Data["MsgID"])

log.Info("enter here, and form.InvitaionCode =" + invitationCode)
if invitationCode != "" {
RegisteUserByInvitaionCode(invitationCode, u.ID, u.PhoneNumber)
}

err := models.AddEmailAddress(&models.EmailAddress{
UID: u.ID,
Email: form.Email,
@@ -1919,7 +1924,7 @@ func SendVerifyCode(ctx *context.Context, slideImage *slideimage.SlideImage, for
return
}

if form.Mode==0 { //注册
if form.Mode == 0 { //注册

if has {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.already_register")))
@@ -1935,32 +1940,31 @@ func SendVerifyCode(ctx *context.Context, slideImage *slideimage.SlideImage, for

} else {
//修改手机号 mode=2 绑定手机
u, err := models.GetUserByPhoneNumber(phoneNumber)
if err != nil && !models.IsErrUserNotExist(err) {
log.Warn("sql err", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.query_err")))
return
}

if u != nil {
u, err := models.GetUserByPhoneNumber(phoneNumber)
if err != nil && !models.IsErrUserNotExist(err) {
log.Warn("sql err", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.query_err")))
return
}

if u.ID == ctx.User.ID { //没有修改手机号
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.not_modify")))
return
} else { //修改的手机已经被别的用户注册
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.already_register")))
return
}
if u != nil {

if u.ID == ctx.User.ID { //没有修改手机号
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.not_modify")))
return
} else { //修改的手机已经被别的用户注册
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.already_register")))
return
}
}

}
}

redisConn := labelmsg.Get()
defer redisConn.Close()

sendTimes, err := phoneService.GetPhoneNumberSendTimes(redisConn, phoneNumber)
if err != nil && err!=redis.ErrNil {
if err != nil && err != redis.ErrNil {
log.Warn("redis err", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("phone.query_err")))
return


+ 2
- 0
routers/user/setting/profile.go View File

@@ -6,6 +6,7 @@
package setting

import (
"code.gitea.io/gitea/modules/notification"
"errors"
"fmt"
"io/ioutil"
@@ -179,6 +180,7 @@ func AvatarPost(ctx *context.Context, form auth.AvatarForm) {
if err := UpdateAvatarSetting(ctx, form, ctx.User); err != nil {
ctx.Flash.Error(err.Error())
} else {
notification.NotifyChangeUserAvatar(ctx.User, form)
ctx.Flash.Success(ctx.Tr("settings.update_avatar_success"))
}



+ 1
- 1
services/phone/phone.go View File

@@ -46,7 +46,7 @@ func SendVerifyCode(conn redis.Conn, phoneNumber string) error {
if err != nil {
return err
}
err = redis_client.Expire(conn, timesKey, getRemainSecondOfDay(time.Now()))
err = redis_client.EXPIRE(conn, timesKey, getRemainSecondOfDay(time.Now()))
if err != nil {
return err
}


+ 22
- 0
services/repository/repository.go View File

@@ -148,6 +148,28 @@ func GetRecommendRepoFromPromote(filename string) ([]map[string]interface{}, err
return resultRepo, nil
}

func RecommendContentFromPromote(url string) (string, error) {
defer func() {
if err := recover(); err != nil {
log.Info("not error.", err)
return
}
}()
resp, err := http.Get(url)
if err != nil || resp.StatusCode != 200 {
log.Info("Get organizations url error=" + err.Error())
return "", err
}
bytes, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Info("Get organizations url error=" + err.Error())
return "", err
}
allLineStr := string(bytes)
return allLineStr, nil
}

func RecommendFromPromote(url string) ([]string, error) {
defer func() {
if err := recover(); err != nil {


+ 50
- 0
services/reward/admin_operate.go View File

@@ -0,0 +1,50 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util"
)

func AdminBalanceOperate(req models.AdminRewardOperateReq, doer *models.User) error {
logId := util.UUID()
_, err := models.InsertRewardAdminLog(&models.RewardAdminLog{
LogId: logId,
Amount: req.Amount,
RewardType: req.RewardType.Name(),
TargetUserId: req.TargetUserId,
CreatorId: doer.ID,
CreatorName: doer.Name,
Remark: req.Remark,
Status: models.RewardAdminLogProcessing,
})
if err != nil {
log.Error("AdminBalanceOperate InsertRewardAdminLog error.%v", err)
return err
}

//reward
err = Operate(&models.RewardOperateContext{
SourceType: models.SourceTypeAdminOperate,
SourceId: logId,
Title: "管理员操作",
Reward: models.Reward{
Amount: req.Amount,
Type: req.RewardType,
},
TargetUserId: req.TargetUserId,
RequestId: logId,
OperateType: req.OperateType,
Remark: req.Remark,
RejectPolicy: models.JustReject,
PermittedNegative: true,
})

if err != nil {
log.Error("AdminBalanceOperate operate error.%v", err)
models.UpdateRewardAdminLogStatus(logId, models.RewardAdminLogProcessing, models.RewardAdminLogFailed)
return err
}
models.UpdateRewardAdminLogStatus(logId, models.RewardAdminLogProcessing, models.RewardAdminLogSuccess)
return nil
}

+ 145
- 0
services/reward/cloudbrain_deduct.go View File

@@ -0,0 +1,145 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"fmt"
"time"
)

var (
ResourceSpecs *models.ResourceSpecs
TrainResourceSpecs *models.ResourceSpecs
)

const RUN_CLOUDBRAIN_TASK_TITTLE = "运行云脑任务"

func AcceptStatusChangeAction() {
for {
select {
case task := <-models.StatusChangeChan:
DeductPoint4Cloudbrain(*task, time.Now())
}
}
}

func StartAndGetCloudBrainPointDeductTask(task models.Cloudbrain) (*models.RewardPeriodicTask, error) {
sourceId := getCloudBrainPointTaskSourceId(task)
r, err := GetPeriodicTask(models.SourceTypeRunCloudbrainTask, sourceId, sourceId, models.OperateTypeDecrease)
if err != nil {
return nil, err
}

if r != nil {
log.Debug("PeriodicTask is already exist.cloudbrain.ID = %d", task.ID)
return r, nil
}

if !setting.CloudBrainPaySwitch {
log.Debug("CloudBrainPaySwitch is off")
return nil, nil
}

unitPrice, err := models.GetCloudbrainTaskUnitPrice(task)
if err != nil {
return nil, err
}
if unitPrice == 0 {
log.Debug("Finish startAndGetCloudBrainPointDeductTask, UnitPrice = 0 task.ID=%d", task.ID)
return nil, nil
}

return StartPeriodicTask(&models.StartPeriodicTaskOpts{
SourceType: models.SourceTypeRunCloudbrainTask,
SourceId: getCloudBrainPointTaskSourceId(task),
TargetUserId: task.UserID,
RequestId: getCloudBrainPointTaskSourceId(task),
OperateType: models.OperateTypeDecrease,
Delay: setting.CloudBrainPayDelay,
Interval: setting.CloudBrainPayInterval,
UnitAmount: unitPrice,
RewardType: models.RewardTypePoint,
StartTime: time.Unix(int64(task.StartTime), 0),
Title: RUN_CLOUDBRAIN_TASK_TITTLE,
})
}

func StopCloudBrainPointDeductTask(task models.Cloudbrain) {
StopPeriodicTask(models.SourceTypeRunCloudbrainTask, getCloudBrainPointTaskSourceId(task), models.OperateTypeDecrease)
}

func getCloudBrainPointTaskSourceId(task models.Cloudbrain) string {
return fmt.Sprint(task.ID)
}

var firstTimeFlag = true

func StartCloudbrainPointDeductTask() {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
log.Debug("try to run CloudbrainPointDeductTask")
end := time.Now()
start := end.Add(-1 * setting.DeductTaskRange)
if firstTimeFlag {
//When it is executed for the first time, it needs to process the tasks of the last 3 hours.
//This is done to prevent the application from hanging for a long time
start = end.Add(-1 * setting.DeductTaskRangeForFirst)
firstTimeFlag = false
}
taskList, err := models.GetStartedCloudbrainTaskByUpdatedUnix(start, end)
if err != nil {
log.Error("GetStartedCloudbrainTaskByUpdatedUnix error. %v", err)
return
}
if taskList == nil || len(taskList) == 0 {
log.Debug("No cloudbrain task need handled")
return
}
for _, t := range taskList {
DeductPoint4Cloudbrain(t, end)
}
log.Debug("CloudbrainPointDeductTask completed")
}

func DeductPoint4Cloudbrain(t models.Cloudbrain, now time.Time) error {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
log.Debug("start to deduct point for cloudbrain[%d]", t.ID)
if t.StartTime == 0 {
log.Debug("cloudbrain[%d] task not start", t.ID)
return nil
}

task, err := StartAndGetCloudBrainPointDeductTask(t)
if err != nil {
log.Error("run cloudbrain point deduct task error,err=%v", err)
return err
}
if task == nil {
log.Debug("cloudbrain[%d] deduct task is nil")
return nil
}
if task.Status == models.PeriodicTaskStatusFinished {
log.Info("Periodic task is finished")
return nil
}

if t.EndTime > 0 {
endTime := time.Unix(int64(t.EndTime), 0)
RunRewardTask(*task, endTime)
models.StopPeriodicTask(task.ID, task.OperateSerialNo, endTime)
} else {
RunRewardTask(*task, now)
}
log.Debug("finished deduct point for cloudbrain[%d]", t.ID)
return nil
}

+ 100
- 0
services/reward/limiter/config.go View File

@@ -0,0 +1,100 @@
package limiter

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
)

func GetSingleDailyPointLimitConfig() (*models.LimitConfigVO, error) {
r, err := GetLimitConfigList(models.LimitConfigQueryOpts{
RefreshRate: models.PeriodDaily,
Scope: models.LimitScopeSingleUser,
LimitCode: models.SourceTypeAccomplishTask.Name(),
LimitType: models.LimitTypeRewardPoint,
})
if err != nil {
return nil, err
}
if r == nil || len(r) == 0 {
return nil, nil
}
return r[0], nil
}

func SetSingleDailyPointLimitConfig(limitNum int64, doer *models.User) error {
l := &models.LimitConfigVO{
RefreshRate: models.PeriodDaily,
Scope: models.LimitScopeSingleUser.Name(),
LimitCode: models.SourceTypeAccomplishTask.Name(),
LimitType: models.LimitTypeRewardPoint.Name(),
LimitNum: limitNum,
}
return AddLimitConfig(l, doer)
}

func GetLimitConfigList(opts models.LimitConfigQueryOpts) ([]*models.LimitConfigVO, error) {
r, err := GetLimitersByLimitType(opts.LimitType)
if err != nil {
log.Error("GetLimitConfigList error when getting limiters by limit type.err=%v", err)
return nil, err
}
result := make([]*models.LimitConfigVO, 0)
for _, v := range r {
if opts.LimitCode != "" && opts.LimitCode != v.LimitCode {
continue
}
if opts.Scope != "" && opts.Scope.Name() != v.Scope {
continue
}
if opts.RefreshRate != "" && opts.RefreshRate != v.RefreshRate {
continue
}
if opts.LimitType != "" && opts.LimitType.Name() != v.LimitType {
continue
}
result = append(result, v.ToLimitConfigVO())
}
return result, nil
}
func GetLimitConfigById(id int64) (*models.LimitConfig, error) {
return models.GetLimitConfigById(id)
}

func AddLimitConfig(config *models.LimitConfigVO, doer *models.User) error {
r := &models.LimitConfig{
Title: config.Title,
RefreshRate: config.RefreshRate,
Scope: config.Scope,
LimitNum: config.LimitNum,
LimitCode: config.LimitCode,
LimitType: config.LimitType,
CreatorId: doer.ID,
CreatorName: doer.Name,
}
err := models.AddLimitConfig(r)

if err != nil {
log.Error("add limit config error,config:%v err:%v", config, err)
return err
}
redis_client.Del(redis_key.LimitConfig(config.LimitType))
return nil
}

func DeleteLimitConfig(id int64, doer *models.User) error {
config, err := GetLimitConfigById(id)
if err != nil {
log.Error("GetLimitConfigById err,e=%v", err)
return err
}
err = models.DeleteLimitConfig(*config, doer.ID, doer.Name)

if err != nil {
log.Error("add limit config error,config:%v err:%v", config, err)
return err
}
redis_client.Del(redis_key.LimitConfig(config.LimitType))
return nil
}

+ 258
- 0
services/reward/limiter/limiter.go View File

@@ -0,0 +1,258 @@
package limiter

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/services/task/period"
"encoding/json"
"errors"
"fmt"
"time"
)

type limiterRunner struct {
limiters []models.LimitConfig
index int
userId int64
amount int64
limitCode string
limitType models.LimitType
rejectPolicy models.LimiterRejectPolicy
resultMap map[int]limitResult
minRealAmount int64
}

type limitResult struct {
isLoss bool
planAmount int64
realAmount int64
}

func newLimitResult(isLoss bool, planAmount int64, realAmount int64) limitResult {
return limitResult{
isLoss: isLoss,
planAmount: planAmount,
realAmount: realAmount,
}
}

func newLimiterRunner(limitCode string, limitType models.LimitType, userId, amount int64, policy models.LimiterRejectPolicy) *limiterRunner {
return &limiterRunner{
userId: userId,
amount: amount,
limitCode: limitCode,
limitType: limitType,
index: 0,
rejectPolicy: policy,
resultMap: make(map[int]limitResult, 0),
}
}

//Run run all limiters
//return real used amount(when choose the FillUp reject policy, amount may only be partially used)
func (l *limiterRunner) Run() error {
if err := l.LoadLimiters(); err != nil {
return err
}
l.minRealAmount = l.amount
for l.index < len(l.limiters) {
err := l.limit(l.limiters[l.index])
if err != nil {
log.Info("limiter check failed,%v", err)
l.Rollback()
return err
}
result := l.resultMap[l.index]
if result.isLoss {
//find the minimum real amount
if l.minRealAmount > result.realAmount {
l.minRealAmount = result.realAmount
}
}
l.index += 1
}

//post process
l.PostProcess()
return nil
}

//Rollback rollback the usedNum from limiters[0] to limiters[index]
func (l *limiterRunner) Rollback() error {
for i := l.index - 1; i >= 0; i-- {
l.rollback(l.limiters[i], l.resultMap[i])
}
return nil
}

func (l *limiterRunner) rollback(r models.LimitConfig, result limitResult) error {
p, err := period.GetPeriod(r.RefreshRate)
if err != nil {
return err
}
redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p)
redis_client.IncrBy(redisKey, -1*result.realAmount)
return nil
}

//PostProcess process loss,if realAmount < planAmount
func (l *limiterRunner) PostProcess() error {
for i := l.index - 1; i >= 0; i-- {
l.postProcess(l.limiters[i], l.resultMap[i])
}
return nil
}

func (l *limiterRunner) postProcess(r models.LimitConfig, result limitResult) error {
if result.realAmount == l.minRealAmount {
return nil
}
p, err := period.GetPeriod(r.RefreshRate)
if err != nil {
return err
}
diff := result.realAmount - l.minRealAmount
redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p)
redis_client.IncrBy(redisKey, -1*diff)
return nil
}

func (l *limiterRunner) limit(r models.LimitConfig) error {
p, err := period.GetPeriod(r.RefreshRate)
if err != nil {
return err
}
redisKey := redis_key.LimitCount(l.userId, r.LimitCode, r.LimitType, r.Scope, p)
usedNum, err := redis_client.IncrBy(redisKey, l.amount)
if err != nil {
return err
}
//if usedNum equals amount,it is the first operation in period or redis cache deleted
//count in database to distinguish the two cases
if usedNum == l.amount {
n, err := l.countInPeriod(r, p)
if err != nil {
return err
}
if n > 0 {
//means redis cache deleted,incr the cache with real value
usedNum, err = redis_client.IncrBy(redisKey, n)
}
if p != nil {
redis_client.Expire(redisKey, p.LeftTime)
} else {
//add default expire time if no period set
redis_client.Expire(redisKey, 24*time.Hour)
}
}
if usedNum > r.LimitNum {
if usedNum-r.LimitNum >= l.amount {
redis_client.IncrBy(redisKey, -1*l.amount)
return errors.New(fmt.Sprintf("over limit,congfigId=%d", r.ID))
}
switch l.rejectPolicy {
case models.FillUp:
exceed := usedNum - r.LimitNum
realAmount := l.amount - exceed
redis_client.IncrBy(redisKey, -1*exceed)
l.resultMap[l.index] = newLimitResult(true, l.amount, realAmount)
return nil
case models.JustReject:
redis_client.IncrBy(redisKey, -1*l.amount)
return errors.New(fmt.Sprintf("over limit,congfigId=%d", r.ID))
case models.PermittedOnce:
l.resultMap[l.index] = newLimitResult(false, l.amount, l.amount)
return nil
}

}
l.resultMap[l.index] = newLimitResult(false, l.amount, l.amount)
return nil
}

func (l *limiterRunner) LoadLimiters() error {
limiters, err := GetLimiters(l.limitCode, l.limitType)
if err != nil {
return err
}
if limiters != nil {
l.limiters = limiters
}
return nil
}

func (l *limiterRunner) countInPeriod(r models.LimitConfig, p *models.PeriodResult) (int64, error) {
switch r.LimitType {
case models.LimitTypeTask.Name():
return models.CountTaskAccomplishLogInTaskPeriod(r.LimitCode, l.userId, p)
case models.LimitTypeRewardPoint.Name():
return models.SumRewardAmountInTaskPeriod(models.RewardTypePoint.Name(), r.LimitCode, l.userId, p)
default:
return 0, nil

}
}

func CheckLimit(limitCode string, limitType models.LimitType, userId, amount int64, rejectPolicy models.LimiterRejectPolicy) (int64, error) {
if rejectPolicy == "" {
rejectPolicy = models.JustReject
}
r := newLimiterRunner(limitCode, limitType, userId, amount, rejectPolicy)
err := r.Run()
if err != nil {
return 0, err
}
return r.minRealAmount, nil
}

func GetLimiters(limitCode string, limitType models.LimitType) ([]models.LimitConfig, error) {
limiters, err := GetLimitersByLimitType(limitType)
if err != nil {
return nil, err
}
result := make([]models.LimitConfig, 0)
for i, v := range limiters {
if v.LimitCode == "" || v.LimitCode == limitCode {
result = append(result, limiters[i])
}
}
return result, nil
}

func GetLimitersByLimitType(limitType models.LimitType) ([]models.LimitConfig, error) {
redisKey := redis_key.LimitConfig(limitType.Name())
val, _ := redis_client.Get(redisKey)
if val != "" {
if val == redis_key.EMPTY_REDIS_VAL {
return nil, nil
}
limiters := make([]models.LimitConfig, 0)
json.Unmarshal([]byte(val), &limiters)
return limiters, nil
}
limiters, err := models.GetLimitConfigByLimitType(limitType)
if err != nil {
if models.IsErrRecordNotExist(err) {
redis_client.Setex(redisKey, redis_key.EMPTY_REDIS_VAL, 5*time.Second)
return nil, nil
}
return nil, err
}
jsonStr, _ := json.Marshal(limiters)
redis_client.Setex(redisKey, string(jsonStr), 30*24*time.Hour)

return limiters, nil
}

func GetLimitersByRelatedIdWithDeleted(limitType models.LimitType) ([]models.LimitConfig, error) {
limiters, err := models.GetLimitersByRelatedIdWithDeleted(limitType)
if err != nil {
if models.IsErrRecordNotExist(err) {
return nil, nil
}
return nil, err
}
return limiters, nil
}

+ 54
- 0
services/reward/notify.go View File

@@ -0,0 +1,54 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/timeutil"
"encoding/json"
"fmt"
"time"
)

func NotifyRewardOperation(userId, amount int64, sourceType models.SourceType, rewardType models.RewardType, operateType models.RewardOperateType) {
switch sourceType {
case models.SourceTypeRunCloudbrainTask:
return
}
data := &models.UserRewardOperationRedis{
UserId: userId,
Amount: amount,
RewardType: rewardType,
OperateType: operateType,
}
b, _ := json.Marshal(data)
redis_client.ZAdd(redis_key.RewardOperateNotification(), string(b), float64(time.Now().Unix()))
}

func GetRewardOperation(since, until timeutil.TimeStamp) []models.UserRewardOperation {
list, err := redis_client.ZRangeByScore(redis_key.RewardOperateNotification(), float64(since), float64(until))
if err != nil {
log.Error("GetRewardOperation ZRangeByScore error. %v", err)
return nil
}
if len(list) == 0 {
log.Debug("GetRewardOperation list length = 0")
return nil
}
r := make([]models.UserRewardOperation, len(list))
for _, v := range list {
t := models.UserRewardOperationRedis{}
json.Unmarshal([]byte(v), &t)
r = append(r, models.UserRewardOperation{
UserId: t.UserId,
Msg: v,
})
}
redis_client.ZRemRangeByScore(redis_key.RewardOperateNotification(), float64(since), float64(until))
return r
}

func GetRewardOperateMsg(u models.UserRewardOperationRedis) string {
return u.OperateType.Show() + fmt.Sprint(u.Amount) + u.RewardType.Show()
}

+ 278
- 0
services/reward/operator.go View File

@@ -0,0 +1,278 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/services/reward/point"
"errors"
"fmt"
"time"
)

var RewardOperatorMap = map[string]RewardOperator{
fmt.Sprint(models.RewardTypePoint): new(point.PointOperator),
}

type RewardOperator interface {
IsLimited(ctx *models.RewardOperateContext) error
Operate(ctx *models.RewardOperateContext) error
}

func Operate(ctx *models.RewardOperateContext) error {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
if !checkRewardOperationParam(ctx) {
log.Error("send reward error,param incorrect")
return errors.New("param incorrect")
}
//add lock
var rewardLock = redis_lock.NewDistributeLock(redis_key.RewardOperateLock(ctx.RequestId, ctx.SourceType.Name(), ctx.OperateType.Name()))
isOk, err := rewardLock.Lock(3 * time.Second)
if err != nil {
return err
}
if !isOk {
log.Info("duplicated reward request,targetUserId=%d requestId=%s", ctx.TargetUserId, ctx.RequestId)
return nil
}
defer rewardLock.UnLock()

//is handled before?
isHandled, err := isHandled(ctx.SourceType.Name(), ctx.RequestId, ctx.OperateType.Name())
if err != nil {
log.Error("reward is handled error,%v", err)
return err
}
if isHandled {
log.Info("reward has been handled,ctx=%+v", ctx)
return nil
}

//get operator
operator := GetOperator(ctx.Reward.Type)
if operator == nil {
log.Error("operator of reward type is not exist,ctx=%v", ctx)
return errors.New("operator of reward type is not exist")
}

if ctx.OperateType == models.OperateTypeIncrease {
//is limited?
if err := operator.IsLimited(ctx); err != nil {
log.Info("operator IsLimited, err=%v", err)
return err
}
}

//new reward operate record
recordId, err := initRewardOperateRecord(ctx)
if err != nil {
log.Error("initRewardOperateRecord error,err=%v", err)
return err
}

ctx.SourceId = recordId

//operate
if err := operator.Operate(ctx); err != nil {
log.Error("operator Operate error,err=%v", err)
UpdateRewardRecordToFinalStatus(ctx.SourceType.Name(), ctx.RequestId, models.OperateStatusFailed)
return err
}

UpdateRewardRecordToFinalStatus(ctx.SourceType.Name(), ctx.RequestId, models.OperateStatusSucceeded)
NotifyRewardOperation(ctx.TargetUserId, ctx.Reward.Amount, ctx.SourceType, ctx.Reward.Type, ctx.OperateType)
return nil
}

func checkRewardOperationParam(ctx *models.RewardOperateContext) bool {
if ctx.Reward.Type == "" {
return false
}
return true
}

func GetOperator(rewardType models.RewardType) RewardOperator {
return RewardOperatorMap[rewardType.Name()]
}

func isHandled(sourceType string, requestId string, operateType string) (bool, error) {
_, err := models.GetPointOperateRecordBySourceTypeAndRequestId(sourceType, requestId, operateType)
if err != nil {
log.Error("operator isHandled error. %v", err)
if models.IsErrRecordNotExist(err) {
return false, nil
}
log.Error("GetPointOperateRecordBySourceTypeAndRequestId ZRangeByScore error. %v", err)
return false, err
}
return true, nil

}

func initRewardOperateRecord(ctx *models.RewardOperateContext) (string, error) {
sn, err := generateOperateSerialNo()
if err != nil {
log.Error("generateOperateSerialNo error. %v", err)
return "", err
}
record := &models.RewardOperateRecord{
UserId: ctx.TargetUserId,
Amount: ctx.Reward.Amount,
LossAmount: ctx.LossAmount,
RewardType: ctx.Reward.Type.Name(),
SourceType: ctx.SourceType.Name(),
SourceId: ctx.SourceId,
SourceTemplateId: ctx.SourceTemplateId,
RequestId: ctx.RequestId,
OperateType: ctx.OperateType.Name(),
Status: models.OperateStatusOperating,
Remark: ctx.Remark,
Title: ctx.Title,
SerialNo: sn,
}
_, err = models.InsertRewardOperateRecord(record)
if err != nil {
log.Error("InsertRewardOperateRecord error. %v", err)
return "", err
}
return record.SerialNo, nil
}

func createPeriodicRewardOperateRecord(ctx *models.StartPeriodicTaskOpts) (string, error) {
sn, err := generateOperateSerialNo()
if err != nil {
log.Error("createPeriodic generateOperateSerialNo error. %v", err)
return "", err
}
record := &models.RewardOperateRecord{
UserId: ctx.TargetUserId,
Amount: 0,
RewardType: ctx.RewardType.Name(),
SourceType: ctx.SourceType.Name(),
SourceId: ctx.SourceId,
RequestId: ctx.RequestId,
OperateType: ctx.OperateType.Name(),
Status: models.OperateStatusOperating,
Remark: ctx.Remark,
Title: ctx.Title,
SerialNo: sn,
}
_, err = models.InsertRewardOperateRecord(record)
if err != nil {
log.Error("createPeriodic InsertRewardOperateRecord error. %v", err)
return "", err
}
return record.SerialNo, nil
}

func UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus string) error {
_, err := models.UpdateRewardRecordToFinalStatus(sourceType, requestId, newStatus)
if err != nil {
log.Error("UpdateRewardRecord UpdateRewardRecordToFinalStatus error. %v", err)
return err
}
return nil
}

func GetPeriodicTask(sourceType models.SourceType, sourceId, requestId string, operateType models.RewardOperateType) (*models.RewardPeriodicTask, error) {
_, err := models.GetPointOperateRecordBySourceTypeAndRequestId(sourceType.Name(), requestId, operateType.Name())
if err == nil {
task, err := models.GetPeriodicTaskBySourceIdAndType(sourceType, sourceId, operateType)
if err != nil {
log.Error("GetPeriodicTaskBySourceIdAndType error,%v", err)
return nil, err
}
return task, nil
}

if err != nil && !models.IsErrRecordNotExist(err) {
log.Error("GetPointOperateRecordBySourceTypeAndRequestId error,%v", err)
return nil, err
}
return nil, nil
}

func StartPeriodicTask(opts *models.StartPeriodicTaskOpts) (*models.RewardPeriodicTask, error) {
//add lock
var rewardLock = redis_lock.NewDistributeLock(redis_key.RewardOperateLock(opts.RequestId, opts.SourceType.Name(), opts.OperateType.Name()))
isOk, err := rewardLock.Lock(3 * time.Second)
if !isOk {
log.Info("duplicated operate request,targetUserId=%d requestId=%s", opts.TargetUserId, opts.RequestId)
return nil, nil
}
defer rewardLock.UnLock()

r, err := GetPeriodicTask(opts.SourceType, opts.SourceId, opts.RequestId, opts.OperateType)
if err != nil {
return nil, err
}

if r != nil {
return r, nil
}

//new reward operate record
recordId, err := createPeriodicRewardOperateRecord(opts)
if err != nil {
log.Error("StartAndGetPeriodicTask createPeriodicRewardOperateRecord error. %v", err)
return nil, err
}

if err = NewRewardPeriodicTask(recordId, opts); err != nil {
log.Error("StartAndGetPeriodicTask NewRewardPeriodicTask error. %v", err)
UpdateRewardRecordToFinalStatus(opts.SourceType.Name(), opts.RequestId, models.OperateStatusFailed)
return nil, err
}

task, err := models.GetPeriodicTaskBySourceIdAndType(opts.SourceType, opts.SourceId, opts.OperateType)
if err != nil {
log.Error("GetPeriodicTaskBySourceIdAndType error,%v", err)
return nil, err
}
return task, nil
}

func StopPeriodicTaskAsyn(sourceType models.SourceType, sourceId string, operateType models.RewardOperateType) {
go StopPeriodicTask(sourceType, sourceId, operateType)
}

func StopPeriodicTask(sourceType models.SourceType, sourceId string, operateType models.RewardOperateType) error {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
task, err := models.GetPeriodicTaskBySourceIdAndType(sourceType, sourceId, operateType)
if err != nil {
log.Error("StopPeriodicTask. GetPeriodicTaskBySourceIdAndType error. %v", err)
return err
}
if task == nil {
log.Info("Periodic task is not exist")
return nil
}
if task.Status == models.PeriodicTaskStatusFinished {
log.Info("Periodic task is finished")
return nil
}
now := time.Now()
RunRewardTask(*task, now)
return models.StopPeriodicTask(task.ID, task.OperateSerialNo, now)
}

func generateOperateSerialNo() (string, error) {
s, err := GetSerialNoByRedis()
if err != nil {
log.Error("generateOperateSerialNo error. %v", err)

return "", err
}
return s, nil
}

+ 131
- 0
services/reward/period_task.go View File

@@ -0,0 +1,131 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/routers/repo"
"errors"
"fmt"
"time"
)

func NewRewardPeriodicTask(operateRecordId string, opts *models.StartPeriodicTaskOpts) error {
task := &models.RewardPeriodicTask{}
task.DelaySeconds = int64(opts.Delay.Seconds())
task.IntervalSeconds = int64(opts.Interval.Seconds())
task.Amount = int64(opts.UnitAmount)
task.OperateSerialNo = operateRecordId
task.Status = models.PeriodicTaskStatusRunning
task.NextExecuteTime = timeutil.TimeStamp(opts.StartTime.Add(opts.Delay).Unix())

_, err := models.InsertPeriodicTask(task)
return err
}

func StartRewardTask() {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
log.Debug("try to run reward tasks")
now := time.Now()
taskList, err := models.GetRunningRewardTask(now)
if err != nil {
log.Error("GetRunningRewardTask error. %v", err)
return
}
if taskList == nil || len(taskList) == 0 {
log.Debug("No GetRunningRewardTask need handled")
return
}
for _, t := range taskList {
RunRewardTask(t, now)
}
}

func RunRewardTask(t models.RewardPeriodicTask, now time.Time) error {
lock := redis_lock.NewDistributeLock(redis_key.RewardTaskRunningLock(t.ID))
isOk, _ := lock.LockWithWait(5*time.Second, 5*time.Second)
if !isOk {
log.Error("get RewardTaskRunningLock failed,t=%+v", t)
return errors.New("get RewardTaskRunningLock failed")
}
defer lock.UnLock()
record, err := models.GetPointOperateRecordBySerialNo(t.OperateSerialNo)
if err != nil {
log.Error("RunRewardTask. GetPointOperateRecordBySerialNo error. %v", err)
return errors.New("GetPointOperateRecordBySerialNo error")
}
if record.Status != models.OperateStatusOperating {
log.Info("RunRewardTask. operate record is finished,record=%+v", record)
return nil
}
n, _ := countExecuteTimes(t, now)
if n == 0 {
log.Info("countExecuteTimes result is 0")
return nil
}

//get operator
operator := GetOperator(models.GetRewardTypeInstance(record.RewardType))
if operator == nil {
log.Error("RunRewardTask. operator of reward type is not exist")
return errors.New("operator of reward type is not exist")
}
nextTime := timeutil.TimeStamp(int64(t.NextExecuteTime) + t.IntervalSeconds)
log.Debug("RunRewardTask n=%d", n)
for i := 1; int64(i) <= n; i++ {
log.Debug("operator.Operate i=%d n=%d", i, n)
err = operator.Operate(&models.RewardOperateContext{
SourceType: models.SourceTypeRunCloudbrainTask,
SourceId: t.OperateSerialNo,
Reward: models.Reward{
Amount: t.Amount,
Type: models.GetRewardTypeInstance(record.RewardType),
},
TargetUserId: record.UserId,
OperateType: models.GetRewardOperateTypeInstance(record.OperateType),
})
if err != nil {
log.Error("RunRewardTask.operator operate error.%v", err)
if models.IsErrInsufficientPointsBalance(err) {
task, err := models.GetCloudbrainByID(record.SourceId)
if err != nil {
log.Error("RunRewardTask GetCloudbrainByID error. %v", err)
return err
}
repo.StopJobs([]*models.Cloudbrain{task})
models.StopPeriodicTask(task.ID, t.OperateSerialNo, time.Now())
return nil
}
return nil
}
models.IncrRewardTaskSuccessCount(t, 1, nextTime)
nextTime = timeutil.TimeStamp(int64(nextTime) + t.IntervalSeconds)
}
return nil

}

func countExecuteTimes(t models.RewardPeriodicTask, now time.Time) (int64, timeutil.TimeStamp) {
interval := t.IntervalSeconds
nextTime := int64(t.NextExecuteTime)
if nextTime > now.Unix() {
return 0, 0
}
diff := now.Unix() - nextTime
var n int64
if diff%interval == 0 {
n = diff / interval
} else {
n = diff/interval + 1
}

newNextTime := timeutil.TimeStamp(nextTime + n*interval)
return n, newNextTime
}

+ 150
- 0
services/reward/point/account/point_account.go View File

@@ -0,0 +1,150 @@
package account

import (
"bytes"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"encoding/json"
"strings"
"time"
)

func GetAccount(userId int64) (*models.PointAccount, error) {
redisKey := redis_key.PointAccountInfo(userId)
val, _ := redis_client.Get(redisKey)
if val != "" {
account := &models.PointAccount{}
json.Unmarshal([]byte(val), account)
return account, nil
}
account, err := models.GetAccountByUserId(userId)
if err != nil {
if models.IsErrRecordNotExist(err) {
a, err := InitAccount(userId)
if err != nil {
log.Error("InitAccount error,err=%v", err)
return nil, err
}
return a, nil
}
log.Error("GetAccountByUserId error,err=%v", err)
return nil, err
}
jsonStr, _ := json.Marshal(account)
redis_client.Setex(redisKey, string(jsonStr), 24*time.Hour)
return account, nil
}

func InitAccount(userId int64) (*models.PointAccount, error) {
lock := redis_lock.NewDistributeLock(redis_key.PointAccountInitLock(userId))
isOk, err := lock.LockWithWait(3*time.Second, 3*time.Second)
if err != nil {
log.Error("PointAccountInitLock error,err=%v", err)
return nil, err
}
if isOk {
defer lock.UnLock()
account, _ := models.GetAccountByUserId(userId)
if account == nil {
models.InsertAccount(&models.PointAccount{
Balance: 0,
TotalEarned: 0,
TotalConsumed: 0,
UserId: userId,
Status: models.PointAccountNormal,
Version: 0,
AccountCode: util.UUID(),
})
return models.GetAccountByUserId(userId)
}
return account, nil
}
return nil, nil

}

//IsPointBalanceEnough check whether the user's point balance is bigger than task unit price
func IsPointBalanceEnough(targetUserId int64, unitPrice int) bool {
if !setting.CloudBrainPaySwitch {
return true
}
if unitPrice == 0 {
return true
}
a, err := GetAccount(targetUserId)
if err != nil {
log.Error("IsPointBalanceEnough GetAccount error,err=%v", err)
return false
}
return a.Balance >= int64(unitPrice)
}

func SearchPointAccount(opt models.SearchPointAccountOpts) (*models.SearchPointAccountResponse, error) {
var result = &models.SearchPointAccountResponse{
Records: make([]*models.UserPointAccount, 0),
PageSize: opt.PageSize,
Page: opt.Page,
Total: 0,
}

userSearch := &models.SearchUserOptions{
Type: models.UserTypeIndividual,
ListOptions: models.ListOptions{
PageSize: 20,
},
SearchByEmail: true,
OrderBy: models.SearchOrderByAlphabetically,
}

userSearch.Page = opt.Page
if userSearch.Page <= 0 {
userSearch.Page = 1
}
userSearch.Keyword = strings.Trim(opt.Keyword, " ")
if len(userSearch.Keyword) == 0 || isKeywordValid(userSearch.Keyword) {
users, count, err := models.SearchUsers(userSearch)
if err != nil {
log.Error("SearchPointAccount SearchUsers error.%v", err)
return nil, err
}
userIds := make([]int64, 0)
for _, v := range users {
userIds = append(userIds, v.ID)
}
accountMap, err := models.GetPointAccountMapByUserIds(userIds)
if err != nil {
return nil, err
}

records := make([]*models.UserPointAccount, 0)
for _, v := range users {
upa := &models.UserPointAccount{
UserId: v.ID,
UserName: v.Name,
Email: v.Email,
Balance: 0,
TotalEarned: 0,
TotalConsumed: 0,
}
a := accountMap[v.ID]
if a != nil {
upa.Balance = a.Balance
upa.TotalConsumed = a.TotalConsumed
upa.TotalEarned = a.TotalEarned
}
records = append(records, upa)
}
result.Records = records
result.Total = count
}
return result, nil
}

func isKeywordValid(keyword string) bool {
return !bytes.Contains([]byte(keyword), []byte{0x00})
}

+ 65
- 0
services/reward/point/point_operate.go View File

@@ -0,0 +1,65 @@
package point

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/services/reward/limiter"
"code.gitea.io/gitea/services/reward/point/account"
"errors"
"time"
)

type PointOperator struct {
}

func (operator *PointOperator) IsLimited(ctx *models.RewardOperateContext) error {
realAmount, err := limiter.CheckLimit(ctx.SourceType.Name(), models.LimitTypeRewardPoint, ctx.TargetUserId, ctx.Reward.Amount, ctx.RejectPolicy)
if err != nil {
log.Error("PointOperator IsLimited error,err=%v", err)
return err
}
if realAmount < ctx.Reward.Amount {
ctx.LossAmount = ctx.Reward.Amount - realAmount
ctx.Reward.Amount = realAmount
}
return nil
}

func (operator *PointOperator) Operate(ctx *models.RewardOperateContext) error {
lock := redis_lock.NewDistributeLock(redis_key.PointAccountOperateLock(ctx.TargetUserId))
isOk, err := lock.LockWithWait(3*time.Second, 3*time.Second)
if err != nil {
log.Error("Get PointAccountOperateLock error,err=%v", err)
return err
}
if isOk {
defer lock.UnLock()
na, err := account.GetAccount(ctx.TargetUserId)
if err != nil || na == nil {
log.Error("operator get account error error,err=%v", err)
return errors.New("get account error")
}
if ctx.OperateType == models.OperateTypeIncrease {
err = na.Increase(ctx.Reward.Amount, ctx.SourceId)
} else if ctx.OperateType == models.OperateTypeDecrease {
if !ctx.PermittedNegative && na.Balance < ctx.Reward.Amount {
log.Info("account balance is not enough,ctx=%v", ctx)
return models.ErrInsufficientPointsBalance{}
}
err = na.Decrease(ctx.Reward.Amount, ctx.SourceId)
}
if err != nil {
log.Error("operate account balance error,err=%v", err)
return err
}
redis_client.Del(redis_key.PointAccountInfo(ctx.TargetUserId))

} else {
log.Error("Get account operate lock failed,ctx=%v", ctx)
return errors.New("Get account operate lock failed")
}
return nil
}

+ 47
- 0
services/reward/record.go View File

@@ -0,0 +1,47 @@
package reward

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
)

type RecordResponse struct {
Records []*models.RewardOperateRecordShow
Total int64
PageSize int
Page int
}

func GetRewardRecordList(opts *models.RewardRecordListOpts) (*RecordResponse, error) {
var l models.RewardRecordShowList
var n int64
var err error
if opts.IsAdmin {
l, n, err = models.GetAdminRewardRecordShowList(opts)
} else {
l, n, err = models.GetRewardRecordShowList(opts)
}
if err != nil {
log.Error("GetRewardRecordList error. %v", err)

return nil, err
}
if len(l) == 0 {
return &RecordResponse{Records: make([]*models.RewardOperateRecordShow, 0), Total: n, Page: opts.Page, PageSize: opts.PageSize}, nil
}
return &RecordResponse{Records: l, Total: n, Page: opts.Page, PageSize: opts.PageSize}, nil
}

func handleRecordResponse(opts *models.RewardRecordListOpts, list models.RewardRecordShowList) {
if opts.IsAdmin {
for _, v := range list {
v.UserName = opts.UserName
}
} else {
for _, v := range list {
if v.Cloudbrain != nil {
v.Cloudbrain.AiCenter = ""
}
}
}
}

+ 28
- 0
services/reward/serial.go View File

@@ -0,0 +1,28 @@
package reward

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"fmt"
"math/rand"
"time"
)

func GetSerialNoByRedis() (string, error) {
now := time.Now()
r := int64(rand.Intn(3)) + 1
n, err := redis_client.IncrBy(redis_key.RewardSerialCounter(now), r)
if err != nil {
log.Error("GetSerialNoByRedis RewardSerialCounter error. %v", err)
return "", err
}
if n == r {
redis_client.Expire(redis_key.RewardSerialCounter(now), 2*time.Minute)
}
//when the counter n exceeds 1000, the length of the serial number will become longer
if n >= 1000 {
return now.Format("200601021504") + fmt.Sprintf("%d", n) + fmt.Sprint(rand.Intn(10)), nil
}
return now.Format("200601021504") + fmt.Sprintf("%03d", n) + fmt.Sprint(rand.Intn(10)), nil
}

+ 14
- 12
services/socketwrap/clientManager.go View File

@@ -10,7 +10,7 @@ import (
"github.com/elliotchance/orderedmap"
)

var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33}
var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35}

type ClientsManager struct {
Clients *orderedmap.OrderedMap
@@ -107,16 +107,18 @@ func initActionQueue() {

func filterUserPrivateInfo(action *models.Action) {
action.Comment = nil
action.ActUser.Email = ""
action.ActUser.Passwd = ""
action.ActUser.PasswdHashAlgo = ""
action.ActUser.PrivateKey = ""
action.ActUser.PublicKey = ""
action.ActUser.Salt = ""
action.ActUser.FullName = ""
action.ActUser.AvatarEmail = ""
action.ActUser.IsAdmin = false
action.ActUser.EmailNotificationsPreference = ""
action.ActUser.IsOperator = false
if action.ActUser != nil {
action.ActUser.Email = ""
action.ActUser.Passwd = ""
action.ActUser.PasswdHashAlgo = ""
action.ActUser.PrivateKey = ""
action.ActUser.PublicKey = ""
action.ActUser.Salt = ""
action.ActUser.FullName = ""
action.ActUser.AvatarEmail = ""
action.ActUser.IsAdmin = false
action.ActUser.EmailNotificationsPreference = ""
action.ActUser.IsOperator = false
}

}

+ 50
- 0
services/task/period/handler.go View File

@@ -0,0 +1,50 @@
package period

import (
"code.gitea.io/gitea/models"
"errors"
"time"
)

var PeriodHandlerMap = map[string]PeriodHandler{
models.PeriodNotCycle: new(NoCycleHandler),
models.PeriodDaily: new(DailyHandler),
}

type PeriodHandler interface {
GetCurrentPeriod() *models.PeriodResult
}

type NoCycleHandler struct {
}

func (l *NoCycleHandler) GetCurrentPeriod() *models.PeriodResult {
return nil
}

type DailyHandler struct {
}

func (l *DailyHandler) GetCurrentPeriod() *models.PeriodResult {
t := time.Now()
startTime := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
endTime := startTime.Add(24 * time.Hour)
leftTime := endTime.Sub(t)
return &models.PeriodResult{
StartTime: startTime,
EndTime: endTime,
LeftTime: leftTime,
}
}

func getPeriodHandler(refreshRateype string) PeriodHandler {
return PeriodHandlerMap[refreshRateype]
}

func GetPeriod(refreshRate string) (*models.PeriodResult, error) {
handler := getPeriodHandler(refreshRate)
if handler == nil {
return nil, errors.New("task config incorrect")
}
return handler.GetCurrentPeriod(), nil
}

+ 163
- 0
services/task/task.go View File

@@ -0,0 +1,163 @@
package task

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/services/reward"
"code.gitea.io/gitea/services/reward/limiter"
"fmt"
"strconv"
"strings"
)

func Accomplish(action models.Action) {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
taskType := models.GetTaskTypeFromAction(action.OpType)
if taskType == "" {
log.Info("Accomplish finished.taskType is not exist.action.ID=%d", action.ID)
return
}
actions := make([]models.Action, 0)
actions = append(actions, action)
switch taskType {
//only creating public repo can be rewarded
case models.TaskCreatePublicRepo:
if action.Repo.IsPrivate {
return
}
//only creating public image can be rewarded
case models.TaskCreateImage:
if action.IsPrivate {
return
}
case models.TaskBindWechat:
n, err := models.CountWechatBindLog(action.Content, models.WECHAT_BIND)
if err != nil {
log.Error("CountWechatBindLog error when accomplish task,err=%v", err)
return
}
//if wechatOpenId has been bound before,the action can not get reward
if n > 1 && models.IsWechatOpenIdRewarded(action.Content) {

log.Debug("the wechat account has been bound before,wechatOpenId = %s", action.Content)
return
}
case models.TaskDatasetRecommended:
datasetIdStr := strings.Split(action.Content, "|")[0]
datasetId, _ := strconv.ParseInt(datasetIdStr, 10, 64)
users, err := models.GetAllDatasetContributorByDatasetId(datasetId)
if err != nil {
return
}
for _, user := range users {
if user.ID == action.ActUserID {
continue
}
actions = append(actions, models.Action{
ID: action.ID,
OpType: models.ActionDatasetRecommended,
ActUserID: action.ActUserID,
UserID: user.ID,
RepoID: action.RepoID,
Content: action.Content,
})
}

}
batchAccomplish(taskType, actions...)
}

func batchAccomplish(taskType models.TaskType, actions ...models.Action) {
for _, act := range actions {
go accomplish(act, taskType)
}
}

func accomplish(action models.Action, taskType models.TaskType) error {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
log.Info("accomplish start. actionId=%d userId= %d", action.ID, action.UserID)
userId := action.UserID
if !isUserAvailable(userId) {
return nil
}

//get task config
config, err := GetTaskConfig(string(taskType))
if err != nil {
log.Error("GetTaskConfig error,%v", err)
return err
}
if config == nil {
log.Info("task config not exist,userId=%d taskType=%s", userId, taskType)
return nil
}

//is limited?
if isLimited(userId, config, models.JustReject) {
log.Info("task accomplish maximum times are reached,userId=%d taskType=%s", userId, taskType)
return nil
}

//add log
_, err = models.InsertTaskAccomplishLog(&models.TaskAccomplishLog{
ConfigId: config.ID,
TaskCode: config.TaskCode,
UserId: userId,
ActionId: action.ID,
})
if err != nil {
log.Error("InsertTaskAccomplishLog error,%v", err)
return err
}

//reward
reward.Operate(&models.RewardOperateContext{
SourceType: models.SourceTypeAccomplishTask,
SourceId: fmt.Sprint(action.ID),
SourceTemplateId: string(taskType),
Title: config.Title,
Reward: models.Reward{
Amount: config.AwardAmount,
Type: models.GetRewardTypeInstance(config.AwardType),
},
TargetUserId: userId,
RequestId: fmt.Sprintf("%d_%d", action.ID, userId),
OperateType: models.OperateTypeIncrease,
RejectPolicy: models.FillUp,
})
log.Debug("accomplish success,action=%v", action)
return nil
}

func isLimited(userId int64, config *models.TaskConfig, rejectPolicy models.LimiterRejectPolicy) bool {
if _, err := limiter.CheckLimit(config.TaskCode, models.LimitTypeTask, userId, 1, rejectPolicy); err != nil {
log.Error(" isLimited CheckLimit error. %v", err)
return true
}
return false

}

func isUserAvailable(userId int64) bool {
if userId < 1 {
return false
}
user, err := models.GetUserByID(userId)
if err != nil || user == nil {
return false
}
if user.IsOrganization() {
return false
}
return true
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save