diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index 8bc971f2c..d294c8823 100755 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -1059,50 +1059,50 @@ RESULT_BACKEND = redis://localhost:6379 [cloudbrain] USERNAME = PASSWORD = -REST_SERVER_HOST = http://192.168.202.73 -JOB_PATH = /datasets/minio/data/opendata/jobs/ -DEBUG_SERVER_HOST = http://192.168.202.73/ +REST_SERVER_HOST = +JOB_PATH = +DEBUG_SERVER_HOST = ; cloudbrain visit opendata -USER = cW4cMtH24eoWPE7X -PWD = 4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DA.C -GPU_TYPE_DEFAULT = openidebug -GPU_TYPES = {"gpu_type":[{"id":1,"queue":"openidebug","value":"T4"},{"id":2,"queue":"openidgx","value":"V100"}]} +USER = +PWD = +GPU_TYPE_DEFAULT = +GPU_TYPES = [benchmark] -ENABLED = true -BENCHMARKCODE = https://yangzhx:justfortest123@git.openi.org.cn/yangzhx/detection_benchmark_script.git -HOST = http://192.168.202.90:3366/ +ENABLED = +BENCHMARKCODE = +HOST = [snn4imagenet] -ENABLED = true -SNN4IMAGENETCODE = https://yult:eh2Ten4iLYjFkbj@git.openi.org.cn/ylt/snn4imagenet.git -HOST = http://192.168.202.90:3366/ +ENABLED = +SNN4IMAGENETCODE = +HOST = [decompress] -HOST = http://192.168.207.34:39987 -USER = cW4cMtH24eoWPE7X -PASSWORD = 4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC +HOST = +USER = +PASSWORD = [blockchain] -HOST = http://192.168.207.84:3002/ -COMMIT_VALID_DATE = 2021-01-15 +HOST = +COMMIT_VALID_DATE = [obs] -ENDPOINT = https://obs.cn-south-222.ai.pcl.cn -ACCESS_KEY_ID = FDP3LRMHLB9S77VWEHE3 -SECRET_ACCESS_KEY = LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN -BUCKET = testopendata -LOCATION = cn-south-222 -BASE_PATH = attachment/ +ENDPOINT = +ACCESS_KEY_ID = +SECRET_ACCESS_KEY = +BUCKET = +LOCATION = +BASE_PATH = [modelarts] -ORGANIZATION = modelarts -ENDPOINT = https://modelarts.cn-south-222.ai.pcl.cn -PROJECT_ID = edfccf24aace4e17a56da6bcbb55a5aa -PROJECT_NAME = cn-south-222_test -USERNAME = test1 -PASSWORD = Qizhi@test. -DOMAIN = cn-south-222 +ORGANIZATION = +ENDPOINT = +PROJECT_ID = +PROJECT_NAME = +USERNAME = +PASSWORD = +DOMAIN = [radar_map] impact=0.3 diff --git a/models/ai_model_manage.go b/models/ai_model_manage.go new file mode 100644 index 000000000..af96444ac --- /dev/null +++ b/models/ai_model_manage.go @@ -0,0 +1,203 @@ +package models + +import ( + "fmt" + + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/builder" + "xorm.io/xorm" +) + +type AiModelManage struct { + ID string `xorm:"pk"` + Name string `xorm:"NOT NULL"` + Version string `xorm:"NOT NULL"` + VersionCount int `xorm:"NOT NULL DEFAULT 0"` + New int `xorm:"NOT NULL"` + Type int `xorm:"NOT NULL"` + Size int64 `xorm:"NOT NULL"` + Description string `xorm:"varchar(2000)"` + Label string `xorm:"varchar(1000)"` + Path string `xorm:"varchar(400) NOT NULL"` + DownloadCount int `xorm:"NOT NULL DEFAULT 0"` + Engine int64 `xorm:"NOT NULL DEFAULT 0"` + Status int `xorm:"NOT NULL DEFAULT 0"` + Accuracy string `xorm:"varchar(1000)"` + AttachmentId string `xorm:"NULL"` + RepoId int64 `xorm:"NULL"` + CodeBranch string `xorm:"varchar(400) NULL"` + CodeCommitID string `xorm:"NULL"` + UserId int64 `xorm:"NOT NULL"` + UserName string `xorm:"NULL"` + UserRelAvatarLink string `xorm:"NULL"` + TrainTaskInfo string `xorm:"text NULL"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + IsCanOper bool +} + +type AiModelQueryOptions struct { + ListOptions + RepoID int64 // include all repos if empty + UserID int64 + ModelID string + SortType string + New int + // JobStatus CloudbrainStatus + Type int +} + +func SaveModelToDb(model *AiModelManage) error { + sess := x.NewSession() + defer sess.Close() + + re, err := sess.Insert(model) + if err != nil { + log.Info("insert error." + err.Error()) + return err + } + log.Info("success to save db.re=" + fmt.Sprint((re))) + return nil +} + +func QueryModelById(id string) (*AiModelManage, error) { + sess := x.NewSession() + defer sess.Close() + sess.Select("*").Table("ai_model_manage"). + Where("id='" + id + "'") + aiModelManageList := make([]*AiModelManage, 0) + err := sess.Find(&aiModelManageList) + if err == nil { + if len(aiModelManageList) == 1 { + return aiModelManageList[0], nil + } + } + return nil, err +} + +func DeleteModelById(id string) error { + sess := x.NewSession() + defer sess.Close() + + re, err := sess.Delete(&AiModelManage{ + ID: id, + }) + if err != nil { + return err + } + log.Info("success to delete from db.re=" + fmt.Sprint((re))) + return nil + +} + +func ModifyModelDescription(id string, description string) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("description").Update(&AiModelManage{ + Description: description, + }) + if err != nil { + return err + } + log.Info("success to update description from db.re=" + fmt.Sprint((re))) + return nil +} + +func ModifyModelNewProperty(id string, new int, versioncount int) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("new", "version_count").Update(&AiModelManage{ + New: new, + VersionCount: versioncount, + }) + if err != nil { + return err + } + log.Info("success to update new property from db.re=" + fmt.Sprint((re))) + return nil +} + +func ModifyModelDownloadCount(id string) error { + sess := x.NewSession() + defer sess.Close() + if _, err := sess.Exec("UPDATE `ai_model_manage` SET download_count = download_count + 1 WHERE id = ?", id); err != nil { + return err + } + + return nil +} + +func QueryModelByName(name string, repoId int64) []*AiModelManage { + sess := x.NewSession() + defer sess.Close() + sess.Select("*").Table("ai_model_manage"). + Where("name='" + name + "' and repo_id=" + fmt.Sprint(repoId)).OrderBy("version desc") + aiModelManageList := make([]*AiModelManage, 0) + sess.Find(&aiModelManageList) + return aiModelManageList +} + +func QueryModel(opts *AiModelQueryOptions) ([]*AiModelManage, int64, error) { + sess := x.NewSession() + defer sess.Close() + + var cond = builder.NewCond() + if opts.RepoID > 0 { + cond = cond.And( + builder.Eq{"ai_model_manage.repo_id": opts.RepoID}, + ) + } + + if opts.UserID > 0 { + cond = cond.And( + builder.Eq{"ai_model_manage.user_id": opts.UserID}, + ) + } + + if opts.New >= 0 { + cond = cond.And( + builder.Eq{"ai_model_manage.new": opts.New}, + ) + } + + if len(opts.ModelID) > 0 { + cond = cond.And( + builder.Eq{"ai_model_manage.id": opts.ModelID}, + ) + } + + if (opts.Type) >= 0 { + cond = cond.And( + builder.Eq{"ai_model_manage.type": opts.Type}, + ) + } + + count, err := sess.Where(cond).Count(new(AiModelManage)) + if err != nil { + return nil, 0, fmt.Errorf("Count: %v", err) + } + + if opts.Page >= 0 && opts.PageSize > 0 { + var start int + if opts.Page == 0 { + start = 0 + } else { + start = (opts.Page - 1) * opts.PageSize + } + sess.Limit(opts.PageSize, start) + } + + sess.OrderBy("ai_model_manage.created_unix DESC") + aiModelManages := make([]*AiModelManage, 0, setting.UI.IssuePagingNum) + if err := sess.Table("ai_model_manage").Where(cond). + Find(&aiModelManages); err != nil { + return nil, 0, fmt.Errorf("Find: %v", err) + } + sess.Close() + + return aiModelManages, count, nil +} diff --git a/models/cloudbrain.go b/models/cloudbrain.go index dd3d3531c..ceb552811 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -91,6 +91,7 @@ type Cloudbrain struct { DeletedAt time.Time `xorm:"deleted"` CanDebug bool `xorm:"-"` CanDel bool `xorm:"-"` + CanModify bool `xorm:"-"` Type int VersionID int64 //版本id @@ -928,6 +929,48 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { return cloudbrains, count, nil } +func QueryModelTrainJobVersionList(jobId string) ([]*CloudbrainInfo, int, error) { + sess := x.NewSession() + defer sess.Close() + + var cond = builder.NewCond() + + cond = cond.And( + builder.Eq{"cloudbrain.job_id": jobId}, + ) + cond = cond.And( + builder.Eq{"cloudbrain.Status": "COMPLETED"}, + ) + + sess.OrderBy("cloudbrain.created_unix DESC") + cloudbrains := make([]*CloudbrainInfo, 0) + if err := sess.Table(&Cloudbrain{}).Where(cond). + Find(&cloudbrains); err != nil { + return nil, 0, fmt.Errorf("Find: %v", err) + } + + return cloudbrains, int(len(cloudbrains)), nil +} + +func QueryModelTrainJobList(repoId int64) ([]*CloudbrainInfo, int, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + cond = cond.And( + builder.Eq{"repo_id": repoId}, + ) + cond = cond.And( + builder.Eq{"Status": "COMPLETED"}, + ) + sess.OrderBy("job_id DESC") + cloudbrains := make([]*CloudbrainInfo, 0) + if err := sess.Distinct("job_id,job_name").Table(&Cloudbrain{}).Where(cond). + Find(&cloudbrains); err != nil { + return nil, 0, fmt.Errorf("Find: %v", err) + } + return cloudbrains, int(len(cloudbrains)), nil +} + func CloudbrainsVersionList(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int, error) { sess := x.NewSession() defer sess.Close() @@ -1032,13 +1075,13 @@ func GetCloudbrainByJobIDAndIsLatestVersion(jobID string, isLatestVersion string func GetCloudbrainsNeededStopByUserID(userID int64) ([]*Cloudbrain, error) { cloudBrains := make([]*Cloudbrain, 0) - err := x.Cols("job_id", "status", "type").Where("user_id=? AND status !=?", userID, string(JobStopped)).Find(&cloudBrains) + err := x.Cols("job_id", "status", "type", "job_type", "version_id").Where("user_id=? AND status !=?", userID, string(JobStopped)).Find(&cloudBrains) return cloudBrains, err } func GetCloudbrainsNeededStopByRepoID(repoID int64) ([]*Cloudbrain, error) { cloudBrains := make([]*Cloudbrain, 0) - err := x.Cols("job_id", "status", "type").Where("repo_id=? AND status !=?", repoID, string(JobStopped)).Find(&cloudBrains) + err := x.Cols("job_id", "status", "type", "job_type", "version_id").Where("repo_id=? AND status !=?", repoID, string(JobStopped)).Find(&cloudBrains) return cloudBrains, err } @@ -1067,7 +1110,8 @@ func UpdateJob(job *Cloudbrain) error { func updateJob(e Engine, job *Cloudbrain) error { var sess *xorm.Session sess = e.Where("job_id = ?", job.JobID) - _, err := sess.Cols("status", "container_id", "container_ip").Update(job) + //_, err := sess.Cols("status", "container_id", "container_ip").Update(job) + _, err := sess.Update(job) return err } @@ -1127,3 +1171,20 @@ func GetCloudBrainUnStoppedJob() ([]*Cloudbrain, error) { Limit(100). Find(&cloudbrains) } + +func GetCloudbrainCountByUserID(userID int64) (int, error) { + count, err := x.In("status", JobWaiting, JobRunning).And("job_type = ? and user_id = ? and type = ?", JobTypeDebug, userID, TypeCloudBrainOne).Count(new(Cloudbrain)) + return int(count), err +} + +func GetCloudbrainNotebookCountByUserID(userID int64) (int, error) { + count, err := x.In("status", ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsRestarting). + And("job_type = ? and user_id = ? and type = ?", JobTypeDebug, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) + return int(count), err +} + +func GetCloudbrainTrainJobCountByUserID(userID int64) (int, error) { + count, err := x.In("status", ModelArtsTrainJobInit, ModelArtsTrainJobImageCreating, ModelArtsTrainJobSubmitTrying, ModelArtsTrainJobWaiting, ModelArtsTrainJobRunning, ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobCheckRunningCompleted). + And("job_type = ? and user_id = ? and type = ?", JobTypeTrain, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) + return int(count), err +} diff --git a/models/dataset.go b/models/dataset.go index 402a548ef..2b3de752b 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -93,6 +93,7 @@ type SearchDatasetOptions struct { IncludePublic bool ListOptions SearchOrderBy + IsOwner bool } func CreateDataset(dataset *Dataset) (err error) { @@ -150,6 +151,9 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { } } else if opts.OwnerID > 0 { cond = cond.And(builder.Eq{"repository.owner_id": opts.OwnerID}) + if !opts.IsOwner { + cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic}) + } } return cond diff --git a/models/models.go b/models/models.go index 809f9b0ca..e8a71bbd8 100755 --- a/models/models.go +++ b/models/models.go @@ -133,6 +133,7 @@ func init() { new(FileChunk), new(BlockChain), new(RecommendOrg), + new(AiModelManage), ) tablesStatistic = append(tablesStatistic, diff --git a/models/repo.go b/models/repo.go index 6d73aa28a..7948346c5 100755 --- a/models/repo.go +++ b/models/repo.go @@ -1114,6 +1114,12 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository) (err error Type: tp, Config: &BlockChainConfig{EnableBlockChain: true}, }) + } else if tp == UnitTypeModelManage { + units = append(units, RepoUnit{ + RepoID: repo.ID, + Type: tp, + Config: &ModelManageConfig{EnableModelManage: true}, + }) } else { units = append(units, RepoUnit{ RepoID: repo.ID, diff --git a/models/repo_unit.go b/models/repo_unit.go index 518c4b979..5f118029f 100755 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -131,6 +131,20 @@ type CloudBrainConfig struct { EnableCloudBrain bool } +type ModelManageConfig struct { + EnableModelManage bool +} + +// FromDB fills up a CloudBrainConfig from serialized format. +func (cfg *ModelManageConfig) FromDB(bs []byte) error { + return json.Unmarshal(bs, &cfg) +} + +// ToDB exports a CloudBrainConfig to a serialized format. +func (cfg *ModelManageConfig) ToDB() ([]byte, error) { + return json.Marshal(cfg) +} + // FromDB fills up a CloudBrainConfig from serialized format. func (cfg *CloudBrainConfig) FromDB(bs []byte) error { return json.Unmarshal(bs, &cfg) @@ -176,6 +190,8 @@ func (r *RepoUnit) BeforeSet(colName string, val xorm.Cell) { r.Config = new(CloudBrainConfig) case UnitTypeBlockChain: r.Config = new(BlockChainConfig) + case UnitTypeModelManage: + r.Config = new(ModelManageConfig) default: panic("unrecognized repo unit type: " + com.ToStr(*val)) } diff --git a/models/unit.go b/models/unit.go index e2b73841a..381491388 100755 --- a/models/unit.go +++ b/models/unit.go @@ -27,6 +27,7 @@ const ( UnitTypeDatasets UnitType = 10 // 10 Dataset UnitTypeCloudBrain UnitType = 11 // 11 CloudBrain UnitTypeBlockChain UnitType = 12 // 12 BlockChain + UnitTypeModelManage UnitType = 13 // 13 ModelManage ) // Value returns integer value for unit type @@ -56,6 +57,8 @@ func (u UnitType) String() string { return "UnitTypeCloudBrain" case UnitTypeBlockChain: return "UnitTypeBlockChain" + case UnitTypeModelManage: + return "UnitTypeModelManage" } return fmt.Sprintf("Unknown UnitType %d", u) } @@ -80,6 +83,7 @@ var ( UnitTypeDatasets, UnitTypeCloudBrain, UnitTypeBlockChain, + UnitTypeModelManage, } // DefaultRepoUnits contains the default unit types @@ -92,6 +96,7 @@ var ( UnitTypeDatasets, UnitTypeCloudBrain, UnitTypeBlockChain, + UnitTypeModelManage, } // NotAllowedDefaultRepoUnits contains units that can't be default @@ -281,6 +286,14 @@ var ( 7, } + UnitModelManage = Unit{ + UnitTypeModelManage, + "repo.modelmanage", + "/modelmanage", + "repo.modelmanage.desc", + 8, + } + // Units contains all the units Units = map[UnitType]Unit{ UnitTypeCode: UnitCode, @@ -293,6 +306,7 @@ var ( UnitTypeDatasets: UnitDataset, UnitTypeCloudBrain: UnitCloudBrain, UnitTypeBlockChain: UnitBlockChain, + UnitTypeModelManage: UnitModelManage, } ) diff --git a/models/user.go b/models/user.go index 8968f7c02..b362472e8 100755 --- a/models/user.go +++ b/models/user.go @@ -145,7 +145,7 @@ type User struct { AllowImportLocal bool // Allow migrate repository by local path AllowCreateOrganization bool `xorm:"DEFAULT true"` ProhibitLogin bool `xorm:"NOT NULL DEFAULT false"` - IsOperator bool `xorm:"NOT NULL DEFAULT false"` //运营人员 + IsOperator bool `xorm:"NOT NULL DEFAULT false"` //运营人员 // Avatar Avatar string `xorm:"VARCHAR(2048) NOT NULL"` @@ -929,8 +929,17 @@ var ( "template", "user", "vendor", - } - reservedUserPatterns = []string{"*.keys", "*.gpg"} + "dashboard", + "operation", + "blockchain", + "avatar", + "swagger.v1.json", + "secure", + "serviceworker.js", + "self", + "repo-avatars", + } + reservedUserPatterns = []string{"*.keys", "*.gpg", "*.png"} ) // isUsableName checks if name is reserved or pattern of name is not allowed @@ -1552,11 +1561,11 @@ func GetUserByActivateEmail(email string) (*User, error) { if err := ctx.e.Join("INNER", "email_address", "email_address.uid = \"user\".id"). Where("email_address.email= ?", email). Find(&users); err != nil { - return nil,err + return nil, err } if len(users) >= 1 { - return &users[0],nil - }else { + return &users[0], nil + } else { // Finally, if email address is the protected email address:用户邮件地址设置为隐藏电子邮件地址 if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) { username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) @@ -1572,6 +1581,7 @@ func GetUserByActivateEmail(email string) (*User, error) { return nil, errors.New("cannot find user by email") } } + // GetUserByEmail returns the user object by given e-mail if exists. func GetUserByEmail(email string) (*User, error) { return GetUserByEmailContext(DefaultDBContext(), email) diff --git a/modules/auth/repo_form.go b/modules/auth/repo_form.go index 8352026fe..8061c6469 100755 --- a/modules/auth/repo_form.go +++ b/modules/auth/repo_form.go @@ -122,6 +122,7 @@ type RepoSettingForm struct { // Advanced settings EnableDataset bool EnableCloudBrain bool + EnableModelManager bool EnableWiki bool EnableExternalWiki bool ExternalWikiURL string diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index 8f6bf4e17..0f1c700d2 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -1,8 +1,10 @@ package cloudbrain import ( - "code.gitea.io/gitea/modules/setting" "errors" + "strconv" + + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" @@ -16,7 +18,7 @@ const ( ModelMountPath = "/model" BenchMarkMountPath = "/benchmark" Snn4imagenetMountPath = "/snn4imagenet" - BrainScoreMountPath = "/brainscore" + BrainScoreMountPath = "/brainscore" TaskInfoName = "/taskInfo" SubTaskName = "task1" @@ -28,6 +30,75 @@ var ( ResourceSpecs *models.ResourceSpecs ) +func isAdminOrOwnerOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error) bool { + if !ctx.IsSigned { + return false + } + log.Info("is repo owner:" + strconv.FormatBool(ctx.IsUserRepoOwner())) + log.Info("is user admin:" + strconv.FormatBool(ctx.IsUserSiteAdmin())) + if err != nil { + + return ctx.IsUserRepoOwner() || ctx.IsUserSiteAdmin() + } else { + log.Info("is job creator:" + strconv.FormatBool(ctx.User.ID == job.UserID)) + return ctx.IsUserRepoOwner() || ctx.IsUserSiteAdmin() || ctx.User.ID == job.UserID + } + +} + +func CanDeleteJob(ctx *context.Context, job *models.Cloudbrain) bool { + + return isAdminOrOwnerOrJobCreater(ctx, job, nil) +} + +func CanCreateOrDebugJob(ctx *context.Context) bool { + if !ctx.IsSigned { + return false + } + return ctx.Repo.CanWrite(models.UnitTypeCloudBrain) +} + +func CanModifyJob(ctx *context.Context, job *models.Cloudbrain) bool { + + return isAdminOrJobCreater(ctx, job, nil) +} + +func isAdminOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error) bool { + if !ctx.IsSigned { + return false + } + if err != nil { + return ctx.IsUserSiteAdmin() + } else { + return ctx.IsUserSiteAdmin() || ctx.User.ID == job.UserID + } + +} + +func AdminOrOwnerOrJobCreaterRight(ctx *context.Context) { + + var jobID = ctx.Params(":jobid") + + job, err := models.GetCloudbrainByJobID(jobID) + + if !isAdminOrOwnerOrJobCreater(ctx, job, err) { + + ctx.NotFound(ctx.Req.URL.RequestURI(), nil) + } + +} + +func AdminOrJobCreaterRight(ctx *context.Context) { + + var jobID = ctx.Params(":jobid") + job, err := models.GetCloudbrainByJobID(jobID) + if !isAdminOrJobCreater(ctx, job, err) { + + ctx.NotFound(ctx.Req.URL.RequestURI(), nil) + } + +} + func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, modelPath, benchmarkPath, snn4imagenetPath, brainScorePath, jobType, gpuQueue string, resourceSpecId int) error { dataActualPath := setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + @@ -46,7 +117,7 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, log.Error("no such resourceSpecId(%d)", resourceSpecId, ctx.Data["MsgID"]) return errors.New("no such resourceSpec") } - + jobResult, err := CreateJob(jobName, models.CreateJobParams{ JobName: jobName, RetryCount: 1, @@ -131,8 +202,8 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, JobName: jobName, SubTaskName: SubTaskName, JobType: jobType, - Type: models.TypeCloudBrainOne, - Uuid: uuid, + Type: models.TypeCloudBrainOne, + Uuid: uuid, }) if err != nil { diff --git a/modules/context/repo.go b/modules/context/repo.go index c6e5e8edd..de494c1bc 100755 --- a/modules/context/repo.go +++ b/modules/context/repo.go @@ -524,7 +524,7 @@ func RepoAssignment() macaron.Handler { } ctx.Data["Tags"] = tags - brs, err := ctx.Repo.GitRepo.GetBranches() + brs, _, err := ctx.Repo.GitRepo.GetBranches(0, 0) if err != nil { ctx.ServerError("GetBranches", err) return @@ -712,7 +712,7 @@ func RepoRefByType(refType RepoRefType) macaron.Handler { refName = ctx.Repo.Repository.DefaultBranch ctx.Repo.BranchName = refName if !ctx.Repo.GitRepo.IsBranchExist(refName) { - brs, err := ctx.Repo.GitRepo.GetBranches() + brs, _, err := ctx.Repo.GitRepo.GetBranches(0, 0) if err != nil { ctx.ServerError("GetBranches", err) return @@ -821,5 +821,6 @@ func UnitTypes() macaron.Handler { ctx.Data["UnitTypeExternalWiki"] = models.UnitTypeExternalWiki ctx.Data["UnitTypeExternalTracker"] = models.UnitTypeExternalTracker ctx.Data["UnitTypeBlockChain"] = models.UnitTypeBlockChain + ctx.Data["UnitTypeModelManage"] = models.UnitTypeModelManage } } diff --git a/modules/cron/tasks_basic.go b/modules/cron/tasks_basic.go index 294690d45..b9838e66f 100755 --- a/modules/cron/tasks_basic.go +++ b/modules/cron/tasks_basic.go @@ -134,7 +134,7 @@ func registerHandleBlockChainUnSuccessRepos() { RegisterTaskFatal("handle_blockchain_unsuccess_repos", &BaseConfig{ Enabled: true, RunAtStart: true, - Schedule: "@every 1m", + Schedule: "@every 10m", }, func(ctx context.Context, _ *models.User, _ Config) error { repo.HandleBlockChainUnSuccessRepos() return nil @@ -145,7 +145,7 @@ func registerHandleBlockChainMergedPulls() { RegisterTaskFatal("handle_blockchain_merged_pull", &BaseConfig{ Enabled: true, RunAtStart: true, - Schedule: "@every 1m", + Schedule: "@every 10m", }, func(ctx context.Context, _ *models.User, _ Config) error { repo.HandleBlockChainMergedPulls() return nil @@ -156,7 +156,7 @@ func registerHandleBlockChainUnSuccessCommits() { RegisterTaskFatal("handle_blockchain_unsuccess_commits", &BaseConfig{ Enabled: true, RunAtStart: true, - Schedule: "@every 3m", + Schedule: "@every 10m", }, func(ctx context.Context, _ *models.User, _ Config) error { repo.HandleBlockChainUnSuccessCommits() return nil diff --git a/modules/git/repo_branch.go b/modules/git/repo_branch.go old mode 100644 new mode 100755 index 8f9c802e0..ad5acfdbf --- a/modules/git/repo_branch.go +++ b/modules/git/repo_branch.go @@ -6,7 +6,9 @@ package git import ( + "bufio" "fmt" + "io" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -74,25 +76,6 @@ func (repo *Repository) SetDefaultBranch(name string) error { return err } -// GetBranches returns all branches of the repository. -func (repo *Repository) GetBranches() ([]string, error) { - var branchNames []string - - branches, err := repo.gogitRepo.Branches() - if err != nil { - return nil, err - } - - _ = branches.ForEach(func(branch *plumbing.Reference) error { - branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix)) - return nil - }) - - // TODO: Sort? - - return branchNames, nil -} - // GetBranch returns a branch by it's name func (repo *Repository) GetBranch(branch string) (*Branch, error) { if !repo.IsBranchExist(branch) { @@ -106,16 +89,16 @@ func (repo *Repository) GetBranch(branch string) (*Branch, error) { } // GetBranchesByPath returns a branch by it's path -func GetBranchesByPath(path string) ([]*Branch, error) { +func GetBranchesByPath(path string, skip, limit int) ([]*Branch, int, error) { gitRepo, err := OpenRepository(path) if err != nil { - return nil, err + return nil, 0, err } defer gitRepo.Close() - brs, err := gitRepo.GetBranches() + brs, countAll, err := gitRepo.GetBranches(skip, limit) if err != nil { - return nil, err + return nil, 0, err } branches := make([]*Branch, len(brs)) @@ -127,7 +110,7 @@ func GetBranchesByPath(path string) ([]*Branch, error) { } } - return branches, nil + return branches, countAll, nil } // DeleteBranchOptions Option(s) for delete branch @@ -183,3 +166,91 @@ func (repo *Repository) RemoveRemote(name string) error { func (branch *Branch) GetCommit() (*Commit, error) { return branch.gitRepo.GetBranchCommit(branch.Name) } + +// GetBranches returns branches from the repository, skipping skip initial branches and +// returning at most limit branches, or all branches if limit is 0. +func (repo *Repository) GetBranches(skip, limit int) ([]string, int, error) { + return callShowRef(repo.Path, BranchPrefix, "--heads", skip, limit) +} + +// callShowRef return refs, if limit = 0 it will not limit +func callShowRef(repoPath, prefix, arg string, skip, limit int) (branchNames []string, countAll int, err error) { + stdoutReader, stdoutWriter := io.Pipe() + defer func() { + _ = stdoutReader.Close() + _ = stdoutWriter.Close() + }() + + go func() { + stderrBuilder := &strings.Builder{} + err := NewCommand("show-ref", arg).RunInDirPipeline(repoPath, stdoutWriter, stderrBuilder) + if err != nil { + if stderrBuilder.Len() == 0 { + _ = stdoutWriter.Close() + return + } + _ = stdoutWriter.CloseWithError(ConcatenateError(err, stderrBuilder.String())) + } else { + _ = stdoutWriter.Close() + } + }() + + i := 0 + bufReader := bufio.NewReader(stdoutReader) + for i < skip { + _, isPrefix, err := bufReader.ReadLine() + if err == io.EOF { + return branchNames, i, nil + } + if err != nil { + return nil, 0, err + } + if !isPrefix { + i++ + } + } + for limit == 0 || i < skip+limit { + // The output of show-ref is simply a list: + // SP LF + _, err := bufReader.ReadSlice(' ') + for err == bufio.ErrBufferFull { + // This shouldn't happen but we'll tolerate it for the sake of peace + _, err = bufReader.ReadSlice(' ') + } + if err == io.EOF { + return branchNames, i, nil + } + if err != nil { + return nil, 0, err + } + + branchName, err := bufReader.ReadString('\n') + if err == io.EOF { + // This shouldn't happen... but we'll tolerate it for the sake of peace + return branchNames, i, nil + } + if err != nil { + return nil, i, err + } + branchName = strings.TrimPrefix(branchName, prefix) + if len(branchName) > 0 { + branchName = branchName[:len(branchName)-1] + } + branchNames = append(branchNames, branchName) + i++ + } + // count all refs + for limit != 0 { + _, isPrefix, err := bufReader.ReadLine() + if err == io.EOF { + return branchNames, i, nil + } + if err != nil { + return nil, 0, err + } + if !isPrefix { + i++ + } + } + return branchNames, i, nil +} diff --git a/modules/git/repo_tag.go b/modules/git/repo_tag.go old mode 100644 new mode 100755 index 7780e3477..08f6f5ec1 --- a/modules/git/repo_tag.go +++ b/modules/git/repo_tag.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/go-git/go-git/v5/plumbing" - "github.com/mcuadros/go-version" ) // TagPrefix tags prefix path on the repository @@ -225,29 +224,35 @@ func (repo *Repository) GetTagInfos(page, pageSize int) ([]*Tag, error) { return tags, nil } -// GetTags returns all tags of the repository. -func (repo *Repository) GetTags() ([]string, error) { - var tagNames []string - - tags, err := repo.gogitRepo.Tags() - if err != nil { - return nil, err - } +//// GetTags returns all tags of the repository. +//func (repo *Repository) GetTags() ([]string, error) { +// var tagNames []string +// +// tags, err := repo.gogitRepo.Tags() +// if err != nil { +// return nil, err +// } +// +// _ = tags.ForEach(func(tag *plumbing.Reference) error { +// tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix)) +// return nil +// }) +// +// version.Sort(tagNames) +// +// // Reverse order +// for i := 0; i < len(tagNames)/2; i++ { +// j := len(tagNames) - i - 1 +// tagNames[i], tagNames[j] = tagNames[j], tagNames[i] +// } +// +// return tagNames, nil +//} - _ = tags.ForEach(func(tag *plumbing.Reference) error { - tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix)) - return nil - }) - - version.Sort(tagNames) - - // Reverse order - for i := 0; i < len(tagNames)/2; i++ { - j := len(tagNames) - i - 1 - tagNames[i], tagNames[j] = tagNames[j], tagNames[i] - } - - return tagNames, nil +// GetTags returns all tags of the repository. +func (repo *Repository) GetTags() (tags []string, err error) { + tags, _, err = callShowRef(repo.Path, TagPrefix, "--tags", 0, 0) + return } // GetTagType gets the type of the tag, either commit (simple) or tag (annotated) diff --git a/modules/git/utils.go b/modules/git/utils.go old mode 100644 new mode 100755 index 83209924c..0d044feda --- a/modules/git/utils.go +++ b/modules/git/utils.go @@ -140,3 +140,11 @@ func ParseBool(value string) (result bool, valid bool) { } return intValue != 0, true } + +// ConcatenateError concatenats an error with stderr string +func ConcatenateError(err error, stderr string) error { + if len(stderr) == 0 { + return err + } + return fmt.Errorf("%w - %s", err, stderr) +} diff --git a/modules/normalization/normalization.go b/modules/normalization/normalization.go index ce616d7f8..a258a13a7 100644 --- a/modules/normalization/normalization.go +++ b/modules/normalization/normalization.go @@ -4,6 +4,8 @@ import ( "code.gitea.io/gitea/modules/setting" ) +const MAX_LINES_RECORD = 100 + func Normalization(value float64, minValue float64, maxValue float64) float64 { min := int64(minValue * 100) @@ -72,9 +74,12 @@ func GetTeamHealthInitValue(contributors int64, keyContributors int64, newContri } -func GetRepoGrowthInitValue(codelinesGrowth int64, issueGrowth int64, commitsGrowth int64, newContributors int64, commentsGrowth int64) float64 { - - return setting.RadarMap.GrowthCodeLines*float64(codelinesGrowth) + +func GetRepoGrowthInitValue(codeLinesGrowth int64, issueGrowth int64, commitsGrowth int64, newContributors int64, commentsGrowth int64) float64 { + codeLinesKB := codeLinesGrowth / 1000 + if codeLinesKB > MAX_LINES_RECORD { + codeLinesKB = MAX_LINES_RECORD + } + return setting.RadarMap.GrowthCodeLines*float64(codeLinesKB) + setting.RadarMap.GrowthIssue*float64(issueGrowth) + setting.RadarMap.GrowthCommit*float64(commitsGrowth) + setting.RadarMap.GrowthContributors*float64(newContributors) + diff --git a/modules/repository/branch.go b/modules/repository/branch.go old mode 100644 new mode 100755 index 418ba25c8..7b203dd91 --- a/modules/repository/branch.go +++ b/modules/repository/branch.go @@ -23,9 +23,10 @@ func GetBranch(repo *models.Repository, branch string) (*git.Branch, error) { return gitRepo.GetBranch(branch) } -// GetBranches returns all the branches of a repository -func GetBranches(repo *models.Repository) ([]*git.Branch, error) { - return git.GetBranchesByPath(repo.RepoPath()) +// GetBranches returns branches from the repository, skipping skip initial branches and +// returning at most limit branches, or all branches if limit is 0. +func GetBranches(repo *models.Repository, skip, limit int) ([]*git.Branch, int, error) { + return git.GetBranchesByPath(repo.RepoPath(), skip, limit) } // checkBranchName validates branch name with existing repository branches @@ -36,7 +37,7 @@ func checkBranchName(repo *models.Repository, name string) error { } defer gitRepo.Close() - branches, err := GetBranches(repo) + branches, _, err := GetBranches(repo, 0, 0) if err != nil { return err } diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 92eae63b0..dd51623c1 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -439,29 +439,34 @@ var ( DecompressOBSTaskName string //cloudbrain config - CBAuthUser string - CBAuthPassword string - RestServerHost string - JobPath string - JobType string - GpuTypes string - DebugServerHost string - ResourceSpecs string + CBAuthUser string + CBAuthPassword string + RestServerHost string + JobPath string + CBCodePathPrefix string + JobType string + GpuTypes string + DebugServerHost string + ResourceSpecs string + MaxDuration int64 //benchmark config IsBenchmarkEnabled bool - BenchmarkCode string + BenchmarkOwner string + BenchmarkName string BenchmarkServerHost string BenchmarkCategory string //snn4imagenet config IsSnn4imagenetEnabled bool - Snn4imagenetCode string + Snn4imagenetOwner string + Snn4imagenetName string Snn4imagenetServerHost string //snn4imagenet config IsBrainScoreEnabled bool - BrainScoreCode string + BrainScoreOwner string + BrainScoreName string BrainScoreServerHost string //blockchain config @@ -549,7 +554,7 @@ var ( RecordBeginTime string IgnoreMirrorRepo bool }{} - + Warn_Notify_Mails []string ) @@ -1216,8 +1221,8 @@ func NewContext() { sec = Cfg.Section("decompress") DecompressAddress = sec.Key("HOST").MustString("http://192.168.207.34:39987") - AuthUser = sec.Key("USER").MustString("cW4cMtH24eoWPE7X") - AuthPassword = sec.Key("PASSWORD").MustString("4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC") + AuthUser = sec.Key("USER").MustString("") + AuthPassword = sec.Key("PASSWORD").MustString("") sec = Cfg.Section("labelsystem") LabelTaskName = sec.Key("LabelTaskName").MustString("LabelRedisQueue") @@ -1225,42 +1230,47 @@ func NewContext() { DecompressOBSTaskName = sec.Key("DecompressOBSTaskName").MustString("LabelDecompressOBSQueue") sec = Cfg.Section("cloudbrain") - CBAuthUser = sec.Key("USER").MustString("cW4cMtH24eoWPE7X") - CBAuthPassword = sec.Key("PWD").MustString("4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC") + CBAuthUser = sec.Key("USER").MustString("") + CBAuthPassword = sec.Key("PWD").MustString("") RestServerHost = sec.Key("REST_SERVER_HOST").MustString("http://192.168.202.73") JobPath = sec.Key("JOB_PATH").MustString("/datasets/minio/data/opendata/jobs/") + CBCodePathPrefix = sec.Key("CODE_PATH_PREFIX").MustString("jobs/") DebugServerHost = sec.Key("DEBUG_SERVER_HOST").MustString("http://192.168.202.73") JobType = sec.Key("GPU_TYPE_DEFAULT").MustString("openidebug") GpuTypes = sec.Key("GPU_TYPES").MustString("") ResourceSpecs = sec.Key("RESOURCE_SPECS").MustString("") + MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) sec = Cfg.Section("benchmark") IsBenchmarkEnabled = sec.Key("ENABLED").MustBool(false) - BenchmarkCode = sec.Key("BENCHMARKCODE").MustString("https://yangzhx:justfortest123@git.openi.org.cn/yangzhx/detection_benchmark_script.git") - BenchmarkServerHost = sec.Key("HOST").MustString("http://192.168.202.90:3366/") + BenchmarkOwner = sec.Key("OWNER").MustString("") + BenchmarkName = sec.Key("NAME").MustString("") + BenchmarkServerHost = sec.Key("HOST").MustString("") BenchmarkCategory = sec.Key("CATEGORY").MustString("") sec = Cfg.Section("snn4imagenet") IsSnn4imagenetEnabled = sec.Key("ENABLED").MustBool(false) - Snn4imagenetCode = sec.Key("SNN4IMAGENETCODE").MustString("https://yult:19910821ylt@git.openi.org.cn/yult/snn4imagenet_script.git") - Snn4imagenetServerHost = sec.Key("HOST").MustString("http://192.168.207.76:8080/") + Snn4imagenetOwner = sec.Key("OWNER").MustString("") + Snn4imagenetName = sec.Key("NAME").MustString("") + Snn4imagenetServerHost = sec.Key("HOST").MustString("") sec = Cfg.Section("brainscore") IsBrainScoreEnabled = sec.Key("ENABLED").MustBool(false) - BrainScoreCode = sec.Key("BRAINSCORECODE").MustString("https://yult:19910821ylt@git.openi.org.cn/yult/brainscore_script.git") - BrainScoreServerHost = sec.Key("HOST").MustString("http://192.168.207.76:8080/") + BrainScoreOwner = sec.Key("OWNER").MustString("") + BrainScoreName = sec.Key("NAME").MustString("") + BrainScoreServerHost = sec.Key("HOST").MustString("") sec = Cfg.Section("blockchain") BlockChainHost = sec.Key("HOST").MustString("http://192.168.136.66:3302/") CommitValidDate = sec.Key("COMMIT_VALID_DATE").MustString("2021-01-15") sec = Cfg.Section("obs") - Endpoint = sec.Key("ENDPOINT").MustString("112.95.163.82") + Endpoint = sec.Key("ENDPOINT").MustString("") AccessKeyID = sec.Key("ACCESS_KEY_ID").MustString("") SecretAccessKey = sec.Key("SECRET_ACCESS_KEY").MustString("") - Bucket = sec.Key("BUCKET").MustString("testopendata") - Location = sec.Key("LOCATION").MustString("cn-south-222") - BasePath = sec.Key("BASE_PATH").MustString("attachment/") + Bucket = sec.Key("BUCKET").MustString("") + Location = sec.Key("LOCATION").MustString("") + BasePath = sec.Key("BASE_PATH").MustString("") TrainJobModelPath = sec.Key("TrainJobModel_Path").MustString("job/") OutPutPath = sec.Key("Output_Path").MustString("output/") CodePathPrefix = sec.Key("CODE_PATH_PREFIX").MustString("code/") @@ -1268,17 +1278,17 @@ func NewContext() { PROXYURL = sec.Key("PROXY_URL").MustString("") sec = Cfg.Section("modelarts") - ModelArtsHost = sec.Key("ENDPOINT").MustString("112.95.163.80") - IamHost = sec.Key("IAMHOST").MustString("112.95.163.80") + ModelArtsHost = sec.Key("ENDPOINT").MustString("") + IamHost = sec.Key("IAMHOST").MustString("") ProjectID = sec.Key("PROJECT_ID").MustString("") ProjectName = sec.Key("PROJECT_NAME").MustString("") ModelArtsUsername = sec.Key("USERNAME").MustString("") ModelArtsPassword = sec.Key("PASSWORD").MustString("") - ModelArtsDomain = sec.Key("DOMAIN").MustString("cn-south-222") + ModelArtsDomain = sec.Key("DOMAIN").MustString("") AllowedOrg = sec.Key("ORGANIZATION").MustString("") ProfileID = sec.Key("PROFILE_ID").MustString("") PoolInfos = sec.Key("POOL_INFOS").MustString("") - Flavor = sec.Key("FLAVOR").MustString("modelarts.bm.910.arm.public.2") + Flavor = sec.Key("FLAVOR").MustString("") ResourcePools = sec.Key("Resource_Pools").MustString("") Engines = sec.Key("Engines").MustString("") EngineVersions = sec.Key("Engine_Versions").MustString("") @@ -1286,10 +1296,10 @@ func NewContext() { TrainJobFLAVORINFOS = sec.Key("TrainJob_FLAVOR_INFOS").MustString("") sec = Cfg.Section("elk") - ElkUrl = sec.Key("ELKURL").MustString("http://192.168.207.35:5601/internal/bsearch") - ElkUser = sec.Key("ELKUSER").MustString("Qizhi") - ElkPassword = sec.Key("ELKPASSWORD").MustString("Pcl2020") - Index = sec.Key("INDEX").MustString("filebeat-7.3.2*") + ElkUrl = sec.Key("ELKURL").MustString("") + ElkUser = sec.Key("ELKUSER").MustString("") + ElkPassword = sec.Key("ELKPASSWORD").MustString("") + Index = sec.Key("INDEX").MustString("") TimeField = sec.Key("TIMEFIELD").MustString(" @timestamptest") ElkTimeFormat = sec.Key("ELKTIMEFORMAT").MustString("date_time") @@ -1313,7 +1323,7 @@ func SetRadarMapConfig() { RadarMap.CompletenessIssuesClosed = sec.Key("completeness_issues_closed").MustFloat64(0.2) RadarMap.CompletenessReleases = sec.Key("completeness_releases").MustFloat64(0.3) RadarMap.CompletenessDevelopAge = sec.Key("completeness_develop_age").MustFloat64(0.1) - RadarMap.CompletenessDataset = sec.Key("completeness_dataset").MustFloat64(0.1) + RadarMap.CompletenessDataset = sec.Key("completeness_dataset").MustFloat64(0) RadarMap.CompletenessModel = sec.Key("completeness_model").MustFloat64(0.1) RadarMap.CompletenessWiki = sec.Key("completeness_wiki").MustFloat64(0.1) RadarMap.Liveness = sec.Key("liveness").MustFloat64(0.3) diff --git a/modules/storage/local.go b/modules/storage/local.go index c462dcd9e..d46a5528d 100644 --- a/modules/storage/local.go +++ b/modules/storage/local.go @@ -76,3 +76,7 @@ func (l *LocalStorage) PresignedPutURL(path string) (string, error) { func (l *LocalStorage) HasObject(path string) (bool, error) { return false, nil } + +func (l *LocalStorage) UploadObject(fileName, filePath string) error { + return nil +} diff --git a/modules/storage/minio.go b/modules/storage/minio.go index b14442d56..664e58d1b 100755 --- a/modules/storage/minio.go +++ b/modules/storage/minio.go @@ -122,3 +122,9 @@ func (m *MinioStorage) HasObject(path string) (bool, error) { return hasObject, nil } + +//upload object +func (m *MinioStorage) UploadObject(fileName, filePath string) error { + _, err := m.client.FPutObject(m.bucket, fileName, filePath, minio.PutObjectOptions{}) + return err +} diff --git a/modules/storage/obs.go b/modules/storage/obs.go index a5c463bb0..367ffe1e8 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -5,6 +5,7 @@ package storage import ( + "errors" "io" "net/url" "path" @@ -140,11 +141,51 @@ func ObsMultiPartUpload(uuid string, uploadId string, partNumber int, fileName s } -func ObsDownload(uuid string, fileName string) (io.ReadCloser, error) { +//delete all file under the dir path +func ObsRemoveObject(bucket string, path string) error { + log.Info("Bucket=" + bucket + " path=" + path) + if len(path) == 0 { + return errors.New("path canot be null.") + } + input := &obs.ListObjectsInput{} + input.Bucket = bucket + // 设置每页100个对象 + input.MaxKeys = 100 + input.Prefix = path + index := 1 + log.Info("prefix=" + input.Prefix) + for { + output, err := ObsCli.ListObjects(input) + if err == nil { + log.Info("Page:%d\n", index) + index++ + for _, val := range output.Contents { + log.Info("delete obs file:" + val.Key) + delObj := &obs.DeleteObjectInput{} + delObj.Bucket = setting.Bucket + delObj.Key = val.Key + ObsCli.DeleteObject(delObj) + } + if output.IsTruncated { + input.Marker = output.NextMarker + } else { + break + } + } else { + if obsError, ok := err.(obs.ObsError); ok { + log.Info("Code:%s\n", obsError.Code) + log.Info("Message:%s\n", obsError.Message) + } + return err + } + } + return nil +} + +func ObsDownloadAFile(bucket string, key string) (io.ReadCloser, error) { input := &obs.GetObjectInput{} - input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") - // input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Bucket = bucket + input.Key = key output, err := ObsCli.GetObject(input) if err == nil { log.Info("StorageClass:%s, ETag:%s, ContentType:%s, ContentLength:%d, LastModified:%s\n", @@ -158,6 +199,11 @@ func ObsDownload(uuid string, fileName string) (io.ReadCloser, error) { } } +func ObsDownload(uuid string, fileName string) (io.ReadCloser, error) { + + return ObsDownloadAFile(setting.Bucket, strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")) +} + func ObsModelDownload(JobName string, fileName string) (io.ReadCloser, error) { input := &obs.GetObjectInput{} input.Bucket = setting.Bucket @@ -176,6 +222,160 @@ func ObsModelDownload(JobName string, fileName string) (io.ReadCloser, error) { } } +func ObsCopyManyFile(srcBucket string, srcPath string, destBucket string, destPath string) (int64, error) { + input := &obs.ListObjectsInput{} + input.Bucket = srcBucket + // 设置每页100个对象 + input.MaxKeys = 100 + input.Prefix = srcPath + index := 1 + length := len(srcPath) + var fileTotalSize int64 + log.Info("prefix=" + input.Prefix) + for { + output, err := ObsCli.ListObjects(input) + if err == nil { + log.Info("Page:%d\n", index) + index++ + for _, val := range output.Contents { + destKey := destPath + val.Key[length:] + obsCopyFile(srcBucket, val.Key, destBucket, destKey) + fileTotalSize += val.Size + } + if output.IsTruncated { + input.Marker = output.NextMarker + } else { + break + } + } else { + if obsError, ok := err.(obs.ObsError); ok { + log.Info("Code:%s\n", obsError.Code) + log.Info("Message:%s\n", obsError.Message) + } + return 0, err + } + } + return fileTotalSize, nil +} + +func obsCopyFile(srcBucket string, srcKeyName string, destBucket string, destKeyName string) error { + input := &obs.CopyObjectInput{} + input.Bucket = destBucket + input.Key = destKeyName + input.CopySourceBucket = srcBucket + input.CopySourceKey = srcKeyName + _, err := ObsCli.CopyObject(input) + if err == nil { + log.Info("copy success,destBuckName:%s, destkeyname:%s", destBucket, destKeyName) + } else { + log.Info("copy failed,,destBuckName:%s, destkeyname:%s", destBucket, destKeyName) + if obsError, ok := err.(obs.ObsError); ok { + log.Info(obsError.Code) + log.Info(obsError.Message) + } + return err + } + return nil +} + +func GetOneLevelAllObjectUnderDir(bucket string, prefixRootPath string, relativePath string) ([]FileInfo, error) { + input := &obs.ListObjectsInput{} + input.Bucket = bucket + input.Prefix = prefixRootPath + relativePath + if !strings.HasSuffix(input.Prefix, "/") { + input.Prefix += "/" + } + output, err := ObsCli.ListObjects(input) + fileInfos := make([]FileInfo, 0) + prefixLen := len(input.Prefix) + if err == nil { + for _, val := range output.Contents { + log.Info("val key=" + val.Key) + var isDir bool + var fileName string + if val.Key == input.Prefix { + continue + } + if strings.Contains(val.Key[prefixLen:len(val.Key)-1], "/") { + continue + } + if strings.HasSuffix(val.Key, "/") { + isDir = true + fileName = val.Key[prefixLen : len(val.Key)-1] + relativePath += val.Key[prefixLen:] + } else { + isDir = false + fileName = val.Key[prefixLen:] + } + fileInfo := FileInfo{ + ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"), + FileName: fileName, + Size: val.Size, + IsDir: isDir, + ParenDir: relativePath, + } + fileInfos = append(fileInfos, fileInfo) + } + return fileInfos, err + } else { + if obsError, ok := err.(obs.ObsError); ok { + log.Error("Code:%s, Message:%s", obsError.Code, obsError.Message) + } + return nil, err + } + +} + +func GetAllObjectByBucketAndPrefix(bucket string, prefix string) ([]FileInfo, error) { + input := &obs.ListObjectsInput{} + input.Bucket = bucket + // 设置每页100个对象 + input.MaxKeys = 100 + input.Prefix = prefix + index := 1 + fileInfos := make([]FileInfo, 0) + prefixLen := len(prefix) + log.Info("prefix=" + input.Prefix) + for { + output, err := ObsCli.ListObjects(input) + if err == nil { + log.Info("Page:%d\n", index) + index++ + for _, val := range output.Contents { + var isDir bool + if prefixLen == len(val.Key) { + continue + } + if strings.HasSuffix(val.Key, "/") { + isDir = true + } else { + isDir = false + } + fileInfo := FileInfo{ + ModTime: val.LastModified.Format("2006-01-02 15:04:05"), + FileName: val.Key[prefixLen:], + Size: val.Size, + IsDir: isDir, + ParenDir: "", + } + fileInfos = append(fileInfos, fileInfo) + } + if output.IsTruncated { + input.Marker = output.NextMarker + } else { + break + } + } else { + if obsError, ok := err.(obs.ObsError); ok { + log.Info("Code:%s\n", obsError.Code) + log.Info("Message:%s\n", obsError.Message) + } + return nil, err + } + } + return fileInfos, nil +} + func GetObsListObject(jobName, parentDir, versionName string) ([]FileInfo, error) { input := &obs.ListObjectsInput{} input.Bucket = setting.Bucket @@ -258,27 +458,6 @@ func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, file return output.SignedUrl, nil } -func GetObsCreateSignedUrl(jobName, parentDir, fileName string) (string, error) { - input := &obs.CreateSignedUrlInput{} - input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, setting.OutPutPath, parentDir, fileName), "/") - - input.Expires = 60 * 60 - input.Method = obs.HttpMethodGet - - reqParams := make(map[string]string) - fileName = url.QueryEscape(fileName) - reqParams["response-content-disposition"] = "attachment; filename=\"" + fileName + "\"" - input.QueryParams = reqParams - output, err := ObsCli.CreateSignedUrl(input) - if err != nil { - log.Error("CreateSignedUrl failed:", err.Error()) - return "", err - } - log.Info("SignedUrl:%s", output.SignedUrl) - return output.SignedUrl, nil -} - func GetObsCreateSignedUrlByBucketAndKey(bucket, key string) (string, error) { input := &obs.CreateSignedUrlInput{} input.Bucket = bucket @@ -302,7 +481,10 @@ func GetObsCreateSignedUrlByBucketAndKey(bucket, key string) (string, error) { } return output.SignedUrl, nil +} +func GetObsCreateSignedUrl(jobName, parentDir, fileName string) (string, error) { + return GetObsCreateSignedUrlByBucketAndKey(setting.Bucket, strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, setting.OutPutPath, parentDir, fileName), "/")) } func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { diff --git a/modules/storage/storage.go b/modules/storage/storage.go index 191871d93..d364346f0 100755 --- a/modules/storage/storage.go +++ b/modules/storage/storage.go @@ -26,6 +26,7 @@ type ObjectStorage interface { PresignedGetURL(path string, fileName string) (string, error) PresignedPutURL(path string) (string, error) HasObject(path string) (bool, error) + UploadObject(fileName, filePath string) error } // Copy copys a file from source ObjectStorage to dest ObjectStorage diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 440147d58..3ab3a00c9 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -816,6 +816,11 @@ get_repo_info_error=Can not get the information of the repository. generate_statistic_file_error=Fail to generate file. repo_stat_inspect=ProjectAnalysis all=All +modelarts.status=Status +modelarts.createtime=CreateTime +modelarts.version_nums = Version Nums +modelarts.version = Version +modelarts.computing_resources=compute Resources modelarts.notebook=Debug Task modelarts.train_job=Train Task modelarts.train_job.new_debug= New Debug Task @@ -823,6 +828,10 @@ modelarts.train_job.new_train=New Train Task modelarts.train_job.config=Configuration information modelarts.train_job.new=New train Task modelarts.train_job.new_place=The description should not exceed 256 characters +modelarts.model_name=Model Name +modelarts.model_size=Model Size +modelarts.import_model=Import Model + modelarts.modify=Modify modelarts.current_version=Current version modelarts.parent_version=Parent Version @@ -874,6 +883,20 @@ modelarts.train_job_para_admin=train_job_para_admin modelarts.train_job_para.edit=train_job_para.edit modelarts.train_job_para.connfirm=train_job_para.connfirm +model.manage.import_new_model=Import New Model +model.manage.create_error=Equal Name and Version has existed. +model.manage.model_name = Model Name +model.manage.version = Version +model.manage.label = Label +model.manage.size = Size +model.manage.create_time = Create Time +model.manage.Description = Description +model.manage.Accuracy = Accuracy +model.manage.F1 = F1 +model.manage.Precision = Precision +model.manage.Recall = Recall + + template.items = Template Items template.git_content = Git Content (Default Branch) template.git_hooks = Git Hooks @@ -1552,6 +1575,7 @@ settings.external_wiki_url_error = The external wiki URL is not a valid URL. settings.external_wiki_url_desc = Visitors are redirected to the external wiki URL when clicking the wiki tab. settings.dataset_desc = Enable Repository Dataset settings.cloudbrain_desc = Enable Cloudbarin +settings.model_desc = Enable Model Manage settings.issues_desc = Enable Repository Issue Tracker settings.use_internal_issue_tracker = Use Built-In Issue Tracker settings.use_external_issue_tracker = Use External Issue Tracker diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index e26e07903..98d581fb7 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -228,6 +228,7 @@ users=用户 organizations=组织 images = 云脑镜像 search=搜索 +search_pro=搜项目 code=代码 data_analysis=数字看板(内测) repo_no_results=未找到匹配的项目。 @@ -781,6 +782,9 @@ datasets=数据集 datasets.desc=数据集功能 cloudbrain_helper=使用GPU/NPU资源,开启Notebook、模型训练任务等 +model_manager = 模型管理 +model_noright=无权限操作 + debug=调试 stop=停止 delete=删除 @@ -823,6 +827,7 @@ all=所有 modelarts.status=状态 modelarts.createtime=创建时间 modelarts.version_nums=版本数 +modelarts.version=版本 modelarts.computing_resources=计算资源 modelarts.notebook=调试任务 modelarts.train_job=训练任务 @@ -830,6 +835,10 @@ modelarts.train_job.new_debug=新建调试任务 modelarts.train_job.new_train=新建训练任务 modelarts.train_job.config=配置信息 modelarts.train_job.new=新建训练任务 +modelarts.train_job.new_place=描述字数不超过256个字符 +modelarts.model_name=模型名称 +modelarts.model_size=模型大小 +modelarts.import_model=导入模型 modelarts.train_job.new_place=描述字数不超过255个字符 modelarts.modify=修改 modelarts.current_version=当前版本 @@ -848,7 +857,7 @@ modelarts.train_job.description=任务描述 modelarts.train_job.parameter_setting=参数设置 modelarts.train_job.parameter_setting_info=参数信息 modelarts.train_job.fast_parameter_setting=一键式参数配置 -modelarts.train_job.fast_parameter_setting_config=如您已保存过参数配置,可单击 +modelarts.train_job.fast_parameter_setting_config=如您已保存过参数配置,可单击 modelarts.train_job.fast_parameter_setting_config_link=这里 modelarts.train_job.frames=常用框架 modelarts.train_job.algorithm_origin=算法来源 @@ -886,6 +895,18 @@ modelarts.train_job_para_admin=任务参数管理 modelarts.train_job_para.edit=编辑 modelarts.train_job_para.connfirm=确定 +model.manage.import_new_model=导入新模型 +model.manage.create_error=相同的名称和版本的模型已经存在。 +model.manage.model_name = 模型名称 +model.manage.version = 版本 +model.manage.label = 标签 +model.manage.size = 大小 +model.manage.create_time = 创建时间 +model.manage.description = 描述 +model.manage.Accuracy = 准确率 +model.manage.F1 = F1值 +model.manage.Precision = 精确率 +model.manage.Recall = 召回率 template.items=模板选项 template.git_content=Git数据(默认分支) @@ -1565,6 +1586,7 @@ settings.external_wiki_url_error=外部百科链接无效 settings.external_wiki_url_desc=当点击任务标签时,访问者将被重定向到外部任务系统的URL。 settings.dataset_desc=启用数据集 settings.cloudbrain_desc = 启用云脑 +settings.model_desc = 启用模型管理 settings.issues_desc=启用任务系统 settings.use_internal_issue_tracker=使用内置的轻量级任务管理系统 settings.use_external_issue_tracker=使用外部的任务管理系统 @@ -1965,7 +1987,7 @@ team_unit_desc=允许访问项目单元 team_unit_disabled=(已禁用) form.name_reserved=组织名称 '%s' 是被保留的。 -form.name_pattern_not_allowed=项目名称中不允许使用 "%s"。 +form.name_pattern_not_allowed=组织名称中不允许使用 "%s"。 form.create_org_not_allowed=此账号禁止创建组织 settings=组织设置 diff --git a/public/img/search.svg b/public/img/search.svg new file mode 100644 index 000000000..ec91b07dd --- /dev/null +++ b/public/img/search.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/routers/api/v1/repo/branch.go b/routers/api/v1/repo/branch.go old mode 100644 new mode 100755 index 57c74d7da..f4d1c924c --- a/routers/api/v1/repo/branch.go +++ b/routers/api/v1/repo/branch.go @@ -204,7 +204,7 @@ func ListBranches(ctx *context.APIContext) { // "200": // "$ref": "#/responses/BranchList" - branches, err := repo_module.GetBranches(ctx.Repo.Repository) + branches, _, err := repo_module.GetBranches(ctx.Repo.Repository,0,0) if err != nil { ctx.Error(http.StatusInternalServerError, "GetBranches", err) return diff --git a/routers/repo/ai_model_manage.go b/routers/repo/ai_model_manage.go new file mode 100644 index 000000000..c6ec6c6ae --- /dev/null +++ b/routers/repo/ai_model_manage.go @@ -0,0 +1,513 @@ +package repo + +import ( + "archive/zip" + "encoding/json" + "errors" + "fmt" + "net/http" + "path" + "strings" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/storage" + uuid "github.com/satori/go.uuid" +) + +const ( + Model_prefix = "aimodels/" + tplModelManageIndex = "repo/modelmanage/index" + tplModelManageDownload = "repo/modelmanage/download" + tplModelInfo = "repo/modelmanage/showinfo" + MODEL_LATEST = 1 + MODEL_NOT_LATEST = 0 +) + +func saveModelByParameters(jobId string, versionName string, name string, version string, label string, description string, ctx *context.Context) error { + aiTask, err := models.GetCloudbrainByJobIDAndVersionName(jobId, versionName) + //aiTask, err := models.GetCloudbrainByJobID(jobId) + if err != nil { + log.Info("query task error." + err.Error()) + return err + } + + uuid := uuid.NewV4() + id := uuid.String() + modelPath := id + var lastNewModelId string + var modelSize int64 + cloudType := models.TypeCloudBrainTwo + + log.Info("find task name:" + aiTask.JobName) + aimodels := models.QueryModelByName(name, aiTask.RepoID) + if len(aimodels) > 0 { + for _, model := range aimodels { + if model.Version == version { + return errors.New(ctx.Tr("repo.model.manage.create_error")) + } + if model.New == MODEL_LATEST { + lastNewModelId = model.ID + } + } + } + cloudType = aiTask.Type + //download model zip //train type + if cloudType == models.TypeCloudBrainTwo { + modelPath, modelSize, err = downloadModelFromCloudBrainTwo(id, aiTask.JobName, "") + if err != nil { + log.Info("download model from CloudBrainTwo faild." + err.Error()) + return err + } + } + accuracy := make(map[string]string) + accuracy["F1"] = "" + accuracy["Recall"] = "" + accuracy["Accuracy"] = "" + accuracy["Precision"] = "" + accuracyJson, _ := json.Marshal(accuracy) + log.Info("accuracyJson=" + string(accuracyJson)) + aiTaskJson, _ := json.Marshal(aiTask) + + //taskConfigInfo,err := models.GetCloudbrainByJobIDAndVersionName(jobId,aiTask.VersionName) + model := &models.AiModelManage{ + ID: id, + Version: version, + VersionCount: len(aimodels) + 1, + Label: label, + Name: name, + Description: description, + New: MODEL_LATEST, + Type: cloudType, + Path: modelPath, + Size: modelSize, + AttachmentId: aiTask.Uuid, + RepoId: aiTask.RepoID, + UserId: ctx.User.ID, + UserName: ctx.User.Name, + UserRelAvatarLink: ctx.User.RelAvatarLink(), + CodeBranch: aiTask.BranchName, + CodeCommitID: aiTask.CommitID, + Engine: aiTask.EngineID, + TrainTaskInfo: string(aiTaskJson), + Accuracy: string(accuracyJson), + } + + err = models.SaveModelToDb(model) + if err != nil { + return err + } + if len(lastNewModelId) > 0 { + //udpate status and version count + models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) + } + + log.Info("save model end.") + + return nil +} + +func SaveModel(ctx *context.Context) { + log.Info("save model start.") + JobId := ctx.Query("JobId") + VersionName := ctx.Query("VersionName") + name := ctx.Query("Name") + version := ctx.Query("Version") + label := ctx.Query("Label") + description := ctx.Query("Description") + + if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { + ctx.ServerError("No right.", errors.New(ctx.Tr("repo.model_noright"))) + return + } + + if JobId == "" || VersionName == "" { + ctx.Error(500, fmt.Sprintf("JobId or VersionName is null.")) + return + } + + if name == "" || version == "" { + ctx.Error(500, fmt.Sprintf("name or version is null.")) + return + } + + err := saveModelByParameters(JobId, VersionName, name, version, label, description, ctx) + + if err != nil { + log.Info("save model error." + err.Error()) + ctx.Error(500, fmt.Sprintf("save model error. %v", err)) + return + } + + log.Info("save model end.") +} + +func downloadModelFromCloudBrainTwo(modelUUID string, jobName string, parentDir string) (string, int64, error) { + + objectkey := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, setting.OutPutPath, parentDir), "/") + modelDbResult, err := storage.GetOneLevelAllObjectUnderDir(setting.Bucket, objectkey, "") + log.Info("bucket=" + setting.Bucket + " objectkey=" + objectkey) + if err != nil { + log.Info("get TrainJobListModel failed:", err) + return "", 0, err + } + if len(modelDbResult) == 0 { + return "", 0, errors.New("cannot create model, as model is empty.") + } + + prefix := objectkey + "/" + destKeyNamePrefix := Model_prefix + models.AttachmentRelativePath(modelUUID) + "/" + + size, err := storage.ObsCopyManyFile(setting.Bucket, prefix, setting.Bucket, destKeyNamePrefix) + + dataActualPath := setting.Bucket + "/" + destKeyNamePrefix + return dataActualPath, size, nil +} + +func DeleteModel(ctx *context.Context) { + log.Info("delete model start.") + id := ctx.Query("ID") + err := deleteModelByID(ctx, id) + if err != nil { + ctx.JSON(500, err.Error()) + } else { + ctx.JSON(200, map[string]string{ + "result_code": "0", + }) + } +} +func isCanDeleteOrDownload(ctx *context.Context, model *models.AiModelManage) bool { + if ctx.User.IsAdmin || ctx.User.ID == model.UserId { + return true + } + if ctx.Repo.IsOwner() { + return true + } + return false +} + +func deleteModelByID(ctx *context.Context, id string) error { + log.Info("delete model start. id=" + id) + model, err := models.QueryModelById(id) + if !isCanDeleteOrDownload(ctx, model) { + return errors.New(ctx.Tr("repo.model_noright")) + } + if err == nil { + log.Info("bucket=" + setting.Bucket + " path=" + model.Path) + if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { + err := storage.ObsRemoveObject(setting.Bucket, model.Path[len(setting.Bucket)+1:]) + if err != nil { + log.Info("Failed to delete model. id=" + id) + return err + } + } + err = models.DeleteModelById(id) + if err == nil { //find a model to change new + aimodels := models.QueryModelByName(model.Name, model.RepoId) + if model.New == MODEL_LATEST { + if len(aimodels) > 0 { + //udpate status and version count + models.ModifyModelNewProperty(aimodels[0].ID, MODEL_LATEST, len(aimodels)) + } + } else { + for _, tmpModel := range aimodels { + if tmpModel.New == MODEL_LATEST { + models.ModifyModelNewProperty(tmpModel.ID, MODEL_LATEST, len(aimodels)) + break + } + } + } + } + } + return err +} + +func QueryModelByParameters(repoId int64, page int) ([]*models.AiModelManage, int64, error) { + + return models.QueryModel(&models.AiModelQueryOptions{ + ListOptions: models.ListOptions{ + Page: page, + PageSize: setting.UI.IssuePagingNum, + }, + RepoID: repoId, + Type: -1, + New: MODEL_LATEST, + }) +} + +func DownloadMultiModelFile(ctx *context.Context) { + log.Info("DownloadMultiModelFile start.") + id := ctx.Query("ID") + log.Info("id=" + id) + task, err := models.QueryModelById(id) + if err != nil { + log.Error("no such model!", err.Error()) + ctx.ServerError("no such model:", err) + return + } + if !isCanDeleteOrDownload(ctx, task) { + ctx.ServerError("no right.", errors.New(ctx.Tr("repo.model_noright"))) + return + } + + path := Model_prefix + models.AttachmentRelativePath(id) + "/" + + allFile, err := storage.GetAllObjectByBucketAndPrefix(setting.Bucket, path) + if err == nil { + //count++ + models.ModifyModelDownloadCount(id) + + returnFileName := task.Name + "_" + task.Version + ".zip" + ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+returnFileName) + ctx.Resp.Header().Set("Content-Type", "application/octet-stream") + w := zip.NewWriter(ctx.Resp) + defer w.Close() + for _, oneFile := range allFile { + if oneFile.IsDir { + log.Info("zip dir name:" + oneFile.FileName) + } else { + log.Info("zip file name:" + oneFile.FileName) + fDest, err := w.Create(oneFile.FileName) + if err != nil { + log.Info("create zip entry error, download file failed: %s\n", err.Error()) + ctx.ServerError("download file failed:", err) + return + } + body, err := storage.ObsDownloadAFile(setting.Bucket, path+oneFile.FileName) + if err != nil { + log.Info("download file failed: %s\n", err.Error()) + ctx.ServerError("download file failed:", err) + return + } else { + defer body.Close() + p := make([]byte, 1024) + var readErr error + var readCount int + // 读取对象内容 + for { + readCount, readErr = body.Read(p) + if readCount > 0 { + fDest.Write(p[:readCount]) + } + if readErr != nil { + break + } + } + } + } + } + } else { + log.Info("error,msg=" + err.Error()) + ctx.ServerError("no file to download.", err) + } +} + +func QueryTrainJobVersionList(ctx *context.Context) { + log.Info("query train job version list. start.") + JobID := ctx.Query("JobID") + + VersionListTasks, count, err := models.QueryModelTrainJobVersionList(JobID) + + log.Info("query return count=" + fmt.Sprint(count)) + + if err != nil { + ctx.ServerError("QueryTrainJobList:", err) + } else { + ctx.JSON(200, VersionListTasks) + } +} + +func QueryTrainJobList(ctx *context.Context) { + log.Info("query train job list. start.") + repoId := ctx.QueryInt64("repoId") + + VersionListTasks, count, err := models.QueryModelTrainJobList(repoId) + log.Info("query return count=" + fmt.Sprint(count)) + + if err != nil { + ctx.ServerError("QueryTrainJobList:", err) + } else { + ctx.JSON(200, VersionListTasks) + } + +} + +func DownloadSingleModelFile(ctx *context.Context) { + log.Info("DownloadSingleModelFile start.") + id := ctx.Params(":ID") + parentDir := ctx.Query("parentDir") + fileName := ctx.Query("fileName") + path := Model_prefix + models.AttachmentRelativePath(id) + "/" + parentDir + fileName + + if setting.PROXYURL != "" { + body, err := storage.ObsDownloadAFile(setting.Bucket, path) + if err != nil { + log.Info("download error.") + } else { + //count++ + models.ModifyModelDownloadCount(id) + defer body.Close() + ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName) + ctx.Resp.Header().Set("Content-Type", "application/octet-stream") + p := make([]byte, 1024) + var readErr error + var readCount int + // 读取对象内容 + for { + readCount, readErr = body.Read(p) + if readCount > 0 { + ctx.Resp.Write(p[:readCount]) + //fmt.Printf("%s", p[:readCount]) + } + if readErr != nil { + break + } + } + } + } else { + url, err := storage.GetObsCreateSignedUrlByBucketAndKey(setting.Bucket, path) + if err != nil { + log.Error("GetObsCreateSignedUrl failed: %v", err.Error(), ctx.Data["msgID"]) + ctx.ServerError("GetObsCreateSignedUrl", err) + return + } + //count++ + models.ModifyModelDownloadCount(id) + http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) + } +} + +func ShowModelInfo(ctx *context.Context) { + ctx.Data["ID"] = ctx.Query("ID") + ctx.Data["name"] = ctx.Query("name") + ctx.Data["isModelManage"] = true + ctx.HTML(200, tplModelInfo) +} + +func ShowSingleModel(ctx *context.Context) { + name := ctx.Query("name") + + log.Info("Show single ModelInfo start.name=" + name) + models := models.QueryModelByName(name, ctx.Repo.Repository.ID) + + ctx.JSON(http.StatusOK, models) +} + +func ShowOneVersionOtherModel(ctx *context.Context) { + repoId := ctx.Repo.Repository.ID + name := ctx.Query("name") + aimodels := models.QueryModelByName(name, repoId) + for _, model := range aimodels { + log.Info("model=" + model.Name) + log.Info("model.UserId=" + fmt.Sprint(model.UserId)) + model.IsCanOper = isOper(ctx, model.UserId) + } + if len(aimodels) > 0 { + ctx.JSON(200, aimodels[1:]) + } else { + ctx.JSON(200, aimodels) + } +} + +func ShowModelTemplate(ctx *context.Context) { + ctx.Data["isModelManage"] = true + ctx.HTML(200, tplModelManageIndex) +} + +func isQueryRight(ctx *context.Context) bool { + if ctx.Repo.Repository.IsPrivate { + if ctx.Repo.CanRead(models.UnitTypeModelManage) || ctx.User.IsAdmin || ctx.Repo.IsAdmin() || ctx.Repo.IsOwner() { + return true + } + return false + } else { + return true + } +} + +func isOper(ctx *context.Context, modelUserId int64) bool { + if ctx.User == nil { + return false + } + if ctx.User.IsAdmin || ctx.Repo.IsAdmin() || ctx.Repo.IsOwner() || ctx.User.ID == modelUserId { + return true + } + return false +} + +func ShowModelPageInfo(ctx *context.Context) { + log.Info("ShowModelInfo start.") + if !isQueryRight(ctx) { + ctx.ServerError("no right.", errors.New(ctx.Tr("repo.model_noright"))) + return + } + page := ctx.QueryInt("page") + if page <= 0 { + page = 1 + } + repoId := ctx.Repo.Repository.ID + Type := -1 + modelResult, count, err := models.QueryModel(&models.AiModelQueryOptions{ + ListOptions: models.ListOptions{ + Page: page, + PageSize: setting.UI.IssuePagingNum, + }, + RepoID: repoId, + Type: Type, + New: MODEL_LATEST, + }) + if err != nil { + ctx.ServerError("Cloudbrain", err) + return + } + + for _, model := range modelResult { + log.Info("model=" + model.Name) + log.Info("model.UserId=" + fmt.Sprint(model.UserId)) + model.IsCanOper = isOper(ctx, model.UserId) + } + + mapInterface := make(map[string]interface{}) + mapInterface["data"] = modelResult + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) +} + +func ModifyModel(id string, description string) error { + err := models.ModifyModelDescription(id, description) + if err == nil { + log.Info("modify success.") + } else { + log.Info("Failed to modify.id=" + id + " desc=" + description + " error:" + err.Error()) + } + return err +} + +func ModifyModelInfo(ctx *context.Context) { + log.Info("modify model start.") + id := ctx.Query("ID") + description := ctx.Query("Description") + + task, err := models.QueryModelById(id) + if err != nil { + log.Error("no such model!", err.Error()) + ctx.ServerError("no such model:", err) + return + } + if !isCanDeleteOrDownload(ctx, task) { + ctx.ServerError("no right.", errors.New(ctx.Tr("repo.model_noright"))) + return + } + + err = ModifyModel(id, description) + + if err != nil { + log.Info("modify error," + err.Error()) + ctx.ServerError("error.", err) + } else { + ctx.JSON(200, "success") + } + +} diff --git a/routers/repo/branch.go b/routers/repo/branch.go old mode 100644 new mode 100755 index e7eac04bc..c8e492373 --- a/routers/repo/branch.go +++ b/routers/repo/branch.go @@ -181,7 +181,7 @@ func deleteBranch(ctx *context.Context, branchName string) error { } func loadBranches(ctx *context.Context) []*Branch { - rawBranches, err := repo_module.GetBranches(ctx.Repo.Repository) + rawBranches, _, err := repo_module.GetBranches(ctx.Repo.Repository, 0, 0) if err != nil { ctx.ServerError("GetBranches", err) return nil diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index bf0fffc18..256bc9e6d 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -8,7 +8,6 @@ import ( "io" "net/http" "os" - "os/exec" "regexp" "sort" "strconv" @@ -71,15 +70,10 @@ func CloudBrainIndex(ctx *context.Context) { return } - timestamp := time.Now().Unix() for i, task := range ciTasks { - if task.Status == string(models.JobRunning) && (timestamp-int64(task.Cloudbrain.CreatedUnix) > 10) { - ciTasks[i].CanDebug = true - } else { - ciTasks[i].CanDebug = false - } + ciTasks[i].CanDebug = cloudbrain.CanCreateOrDebugJob(ctx) + ciTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) - ciTasks[i].CanDel = models.CanDelJob(ctx.IsSigned, ctx.User, task) } pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) @@ -88,6 +82,7 @@ func CloudBrainIndex(ctx *context.Context) { ctx.Data["PageIsCloudBrain"] = true ctx.Data["Tasks"] = ciTasks + ctx.Data["CanCreate"] = cloudbrain.CanCreateOrDebugJob(ctx) ctx.HTML(200, tplCloudBrainIndex) } @@ -216,7 +211,22 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { return } - _, err := models.GetCloudbrainByName(jobName) + count, err := models.GetCloudbrainCountByUserID(ctx.User.ID) + if err != nil { + log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("system error", tplCloudBrainNew, &form) + return + } else { + if count >= 1 { + log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplCloudBrainNew, &form) + return + } + } + + _, err = models.GetCloudbrainByName(jobName) if err == nil { log.Error("the job name did already exist", ctx.Data["MsgID"]) cloudBrainNewDataPrepare(ctx) @@ -232,14 +242,11 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { } repo := ctx.Repo.Repository downloadCode(repo, codePath) + uploadCodeToMinio(codePath + "/", jobName, "/code/") - modelPath := setting.JobPath + jobName + cloudbrain.ModelMountPath - err = os.MkdirAll(modelPath, os.ModePerm) - if err != nil { - cloudBrainNewDataPrepare(ctx) - ctx.RenderWithErr(err.Error(), tplCloudBrainNew, &form) - return - } + modelPath := setting.JobPath + jobName + cloudbrain.ModelMountPath + "/" + mkModelPath(modelPath) + uploadCodeToMinio(modelPath, jobName, cloudbrain.ModelMountPath + "/") benchmarkPath := setting.JobPath + jobName + cloudbrain.BenchMarkMountPath if setting.IsBenchmarkEnabled && jobType == string(models.JobTypeBenchmark) { @@ -249,20 +256,25 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { gpuType = gpuInfo.Value } } - downloadRateCode(repo, jobName, setting.BenchmarkCode, benchmarkPath, form.BenchmarkCategory, gpuType) + downloadRateCode(repo, jobName, setting.BenchmarkOwner, setting.BrainScoreName, benchmarkPath, form.BenchmarkCategory, gpuType) + uploadCodeToMinio(benchmarkPath + "/", jobName, cloudbrain.BenchMarkMountPath + "/") } snn4imagenetPath := setting.JobPath + jobName + cloudbrain.Snn4imagenetMountPath if setting.IsSnn4imagenetEnabled && jobType == string(models.JobTypeSnn4imagenet) { - downloadRateCode(repo, jobName, setting.Snn4imagenetCode, snn4imagenetPath, "", "") + downloadRateCode(repo, jobName, setting.Snn4imagenetOwner, setting.Snn4imagenetName, snn4imagenetPath, "", "") + uploadCodeToMinio(snn4imagenetPath + "/", jobName, cloudbrain.Snn4imagenetMountPath + "/") } brainScorePath := setting.JobPath + jobName + cloudbrain.BrainScoreMountPath if setting.IsBrainScoreEnabled && jobType == string(models.JobTypeBrainScore) { - downloadRateCode(repo, jobName, setting.BrainScoreCode, brainScorePath, "", "") + downloadRateCode(repo, jobName, setting.BrainScoreOwner, setting.BrainScoreName, brainScorePath, "", "") + uploadCodeToMinio(brainScorePath + "/", jobName, cloudbrain.BrainScoreMountPath + "/") } - err = cloudbrain.GenerateTask(ctx, jobName, image, command, uuid, codePath, modelPath, benchmarkPath, snn4imagenetPath, brainScorePath, jobType, gpuQueue, resourceSpecId) + err = cloudbrain.GenerateTask(ctx, jobName, image, command, uuid, codePath, getMinioPath(jobName, cloudbrain.ModelMountPath + "/"), + getMinioPath(jobName, cloudbrain.BenchMarkMountPath + "/"), getMinioPath(jobName, cloudbrain.Snn4imagenetMountPath + "/"), + getMinioPath(jobName, cloudbrain.BrainScoreMountPath + "/"), jobType, gpuQueue, resourceSpecId) if err != nil { cloudBrainNewDataPrepare(ctx) ctx.RenderWithErr(err.Error(), tplCloudBrainNew, &form) @@ -323,7 +335,7 @@ func CloudBrainDebug(ctx *context.Context) { var jobID = ctx.Params(":jobid") if !ctx.IsSigned { log.Error("the user has not signed in") - ctx.Error(http.StatusForbidden, "","the user has not signed in") + ctx.Error(http.StatusForbidden, "", "the user has not signed in") return } task, err := models.GetCloudbrainByJobID(jobID) @@ -340,7 +352,7 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain var jobID = ctx.Params(":jobid") if !ctx.IsSigned { log.Error("the user has not signed in") - ctx.Error(http.StatusForbidden, "","the user has not signed in") + ctx.Error(http.StatusForbidden, "", "the user has not signed in") return } task, err := models.GetCloudbrainByJobID(jobID) @@ -437,14 +449,22 @@ func StopJobs(cloudBrains []*models.Cloudbrain) { logErrorAndUpdateJobStatus(err, taskInfo) } else { - param := models.NotebookAction{ - Action: models.ActionStop, + if taskInfo.JobType == string(models.JobTypeTrain) { + err := retry(3, time.Second*30, func() error { + _, err := modelarts.StopTrainJob(taskInfo.JobID, strconv.FormatInt(taskInfo.VersionID, 10)) + return err + }) + logErrorAndUpdateJobStatus(err, taskInfo) + } else { + param := models.NotebookAction{ + Action: models.ActionStop, + } + err := retry(3, time.Second*30, func() error { + _, err := modelarts.StopJob(taskInfo.JobID, param) + return err + }) + logErrorAndUpdateJobStatus(err, taskInfo) } - err := retry(3, time.Second*30, func() error { - _, err := modelarts.StopJob(taskInfo.JobID, param) - return err - }) - logErrorAndUpdateJobStatus(err, taskInfo) } } @@ -513,10 +533,10 @@ func CloudBrainShowModels(ctx *context.Context) { } //get dirs - dirs, err := getModelDirs(task.JobName, parentDir) + dirs, err := GetModelDirs(task.JobName, parentDir) if err != nil { - log.Error("getModelDirs failed:%v", err.Error(), ctx.Data["msgID"]) - ctx.ServerError("getModelDirs failed:", err) + log.Error("GetModelDirs failed:%v", err.Error(), ctx.Data["msgID"]) + ctx.ServerError("GetModelDirs failed:", err) return } @@ -576,9 +596,9 @@ func getImages(ctx *context.Context, imageType string) { log.Info("Get images end") } -func getModelDirs(jobName string, parentDir string) (string, error) { +func GetModelDirs(jobName string, parentDir string) (string, error) { var req string - modelActualPath := setting.JobPath + jobName + "/model/" + modelActualPath := getMinioPath(jobName, cloudbrain.ModelMountPath + "/") if parentDir == "" { req = "baseDir=" + modelActualPath } else { @@ -588,6 +608,10 @@ func getModelDirs(jobName string, parentDir string) (string, error) { return getDirs(req) } +func getMinioPath(jobName, suffixPath string) string { + return setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.CBCodePathPrefix + jobName + suffixPath +} + func CloudBrainDownloadModel(ctx *context.Context) { parentDir := ctx.Query("parentDir") fileName := ctx.Query("fileName") @@ -669,19 +693,21 @@ func downloadCode(repo *models.Repository, codePath string) error { return nil } -func downloadRateCode(repo *models.Repository, taskName, gitPath, codePath, benchmarkCategory, gpuType string) error { +func downloadRateCode(repo *models.Repository, taskName, rateOwnerName, rateRepoName, codePath, benchmarkCategory, gpuType string) error { err := os.MkdirAll(codePath, os.ModePerm) if err != nil { log.Error("mkdir codePath failed", err.Error()) return err } - command := "git clone " + gitPath + " " + codePath - cmd := exec.Command("/bin/bash", "-c", command) - output, err := cmd.Output() - log.Info(string(output)) + repoExt, err := models.GetRepositoryByOwnerAndName(rateOwnerName, rateRepoName) if err != nil { - log.Error("exec.Command(%s) failed:%v", command, err) + log.Error("GetRepositoryByOwnerAndName(%s) failed", rateRepoName, err.Error()) + return err + } + + if err := git.Clone(repoExt.RepoPath(), codePath, git.CloneRepoOptions{}); err != nil { + log.Error("Failed to clone repository: %s (%v)", repoExt.FullName(), err) return err } @@ -716,6 +742,59 @@ func downloadRateCode(repo *models.Repository, taskName, gitPath, codePath, benc return nil } +func uploadCodeToMinio(codePath, jobName, parentDir string) error { + files, err := readDir(codePath) + if err != nil { + log.Error("readDir(%s) failed: %s", codePath, err.Error()) + return err + } + + for _, file := range files { + if file.IsDir() { + if err = uploadCodeToMinio(codePath+file.Name()+"/", jobName, parentDir+file.Name()+"/"); err != nil { + log.Error("uploadCodeToMinio(%s) failed: %s", file.Name(), err.Error()) + return err + } + } else { + destObject := setting.CBCodePathPrefix + jobName + parentDir + file.Name() + sourceFile := codePath + file.Name() + err = storage.Attachments.UploadObject(destObject, sourceFile) + if err != nil { + log.Error("UploadObject(%s) failed: %s", file.Name(), err.Error()) + return err + } + } + } + + return nil +} + +func mkModelPath(modelPath string) error { + err := os.MkdirAll(modelPath, os.ModePerm) + if err != nil { + log.Error("MkdirAll(%s) failed:%v", modelPath, err) + return err + } + + fileName := modelPath + "README" + f, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) + if err != nil { + log.Error("OpenFile failed", err.Error()) + return err + } + + defer f.Close() + + _, err = f.WriteString("You can put the model file into this directory and download it by the web page.") + if err != nil { + log.Error("WriteString failed", err.Error()) + return err + } + + return nil +} + + func SyncCloudbrainStatus() { cloudBrains, err := models.GetCloudBrainUnStoppedJob() if err != nil { @@ -737,10 +816,24 @@ func SyncCloudbrainStatus() { taskRes, _ := models.ConvertToTaskPod(taskRoles[cloudbrain.SubTaskName].(map[string]interface{})) task.Status = taskRes.TaskStatuses[0].State if task.Status != string(models.JobWaiting) { + task.Duration = time.Now().Unix() - taskRes.TaskStatuses[0].StartAt.Unix() err = models.UpdateJob(task) if err != nil { log.Error("UpdateJob(%s) failed:%v", task.JobName, err) - continue + } + + if task.Duration >= setting.MaxDuration { + log.Info("begin to stop job(%s), because of the duration", task.JobName) + err = cloudbrain.StopJob(task.JobID) + if err != nil { + log.Error("StopJob(%s) failed:%v", task.JobName, err) + continue + } + task.Status = string(models.JobStopped) + err = models.UpdateJob(task) + if err != nil { + log.Error("UpdateJob(%s) failed:%v", task.JobName, err) + } } } } diff --git a/routers/repo/compare.go b/routers/repo/compare.go old mode 100644 new mode 100755 index 97bb5e6b1..babe416a7 --- a/routers/repo/compare.go +++ b/routers/repo/compare.go @@ -507,7 +507,7 @@ func getBranchesForRepo(user *models.User, repo *models.Repository) (bool, []str } defer gitRepo.Close() - branches, err := gitRepo.GetBranches() + branches, _, err := gitRepo.GetBranches(0, 0) if err != nil { return false, nil, err } @@ -528,7 +528,7 @@ func CompareDiff(ctx *context.Context) { } if ctx.Data["PageIsComparePull"] == true { - headBranches, err := headGitRepo.GetBranches() + headBranches, _, err := headGitRepo.GetBranches(0,0) if err != nil { ctx.ServerError("GetBranches", err) return diff --git a/routers/repo/dir.go b/routers/repo/dir.go index 406f3dc73..81549e76a 100755 --- a/routers/repo/dir.go +++ b/routers/repo/dir.go @@ -12,7 +12,6 @@ import ( "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/obs" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" ) @@ -70,40 +69,10 @@ func DeleteAllUnzipFile(attachment *models.Attachment, parentDir string) { } } if attachment.Type == models.TypeCloudBrainTwo { - - input := &obs.ListObjectsInput{} - input.Bucket = setting.Bucket - // 设置每页100个对象 - input.MaxKeys = 100 - input.Prefix = setting.BasePath + attachment.RelativePath() + attachment.UUID - index := 1 - log.Info("prefix=" + input.Prefix) - for { - output, err := storage.ObsCli.ListObjects(input) - if err == nil { - log.Info("Page:%d\n", index) - index++ - for _, val := range output.Contents { - log.Info("delete obs file:" + val.Key) - delObj := &obs.DeleteObjectInput{} - delObj.Bucket = setting.Bucket - delObj.Key = val.Key - storage.ObsCli.DeleteObject(delObj) - } - if output.IsTruncated { - input.Marker = output.NextMarker - } else { - break - } - } else { - if obsError, ok := err.(obs.ObsError); ok { - log.Info("Code:%s\n", obsError.Code) - log.Info("Message:%s\n", obsError.Message) - } - break - } + err := storage.ObsRemoveObject(setting.Bucket, setting.BasePath+attachment.RelativePath()+attachment.UUID) + if err != nil { + log.Info("delete file error.") } - } } diff --git a/routers/repo/http.go b/routers/repo/http.go index ad2abf567..87406a2c3 100644 --- a/routers/repo/http.go +++ b/routers/repo/http.go @@ -257,7 +257,6 @@ func HTTP(ctx *context.Context) { models.EnvPusherID + fmt.Sprintf("=%d", authUser.ID), models.EnvIsDeployKey + "=false", } - if !authUser.KeepEmailPrivate { environ = append(environ, models.EnvPusherEmail+"="+authUser.Email) } @@ -559,6 +558,7 @@ func serviceRPC(h serviceHandler, service string) { if service == "receive-pack" { cmd.Env = append(os.Environ(), h.environ...) } + cmd.Stdout = h.w cmd.Stdin = reqBody cmd.Stderr = &stderr diff --git a/routers/repo/issue.go b/routers/repo/issue.go index 9483814d6..77ed0251d 100755 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -424,7 +424,7 @@ func RetrieveRepoMetas(ctx *context.Context, repo *models.Repository, isPull boo return nil } - brs, err := ctx.Repo.GitRepo.GetBranches() + brs, _, err := ctx.Repo.GitRepo.GetBranches(0,0) if err != nil { ctx.ServerError("GetBranches", err) return nil diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 7324bbbee..3994ba542 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -11,6 +11,8 @@ import ( "strings" "time" + "code.gitea.io/gitea/modules/cloudbrain" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" @@ -67,11 +69,9 @@ func NotebookIndex(ctx *context.Context) { } for i, task := range ciTasks { - if task.Status == string(models.JobRunning) { - ciTasks[i].CanDebug = true - } else { - ciTasks[i].CanDebug = false - } + + ciTasks[i].CanDebug = cloudbrain.CanCreateOrDebugJob(ctx) + ciTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) } pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) @@ -80,6 +80,7 @@ func NotebookIndex(ctx *context.Context) { ctx.Data["PageIsCloudBrain"] = true ctx.Data["Tasks"] = ciTasks + ctx.Data["CanCreate"] = cloudbrain.CanCreateOrDebugJob(ctx) ctx.HTML(200, tplModelArtsNotebookIndex) } @@ -115,7 +116,22 @@ func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) description := form.Description flavor := form.Flavor - err := modelarts.GenerateTask(ctx, jobName, uuid, description, flavor) + count, err := models.GetCloudbrainNotebookCountByUserID(ctx.User.ID) + if err != nil { + log.Error("GetCloudbrainNotebookCountByUserID failed:%v", err, ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("system error", tplModelArtsNotebookNew, &form) + return + } else { + if count >= 1 { + log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsNotebookNew, &form) + return + } + } + + err = modelarts.GenerateTask(ctx, jobName, uuid, description, flavor) if err != nil { ctx.RenderWithErr(err.Error(), tplModelArtsNotebookNew, &form) return @@ -286,12 +302,18 @@ func TrainJobIndex(ctx *context.Context) { return } + for i, task := range tasks { + tasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) + tasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) + } + pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) pager.SetDefaultParams(ctx) ctx.Data["Page"] = pager ctx.Data["PageIsCloudBrain"] = true ctx.Data["Tasks"] = tasks + ctx.Data["CanCreate"] = cloudbrain.CanCreateOrDebugJob(ctx) ctx.HTML(200, tplModelArtsTrainJobIndex) } @@ -360,18 +382,6 @@ func trainJobNewDataPrepare(ctx *context.Context) error { outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath ctx.Data["train_url"] = outputObsPath - - Branches, err := ctx.Repo.GitRepo.GetBranches() - if err != nil { - log.Error("GetBranches failed:%v", err) - ctx.ServerError("GetBranches error:", err) - return err - } - if Branches != nil { - ctx.Data["Branches"] = Branches - } - - ctx.Data["BranchesCount"] = len(Branches) ctx.Data["params"] = "" ctx.Data["branchName"] = ctx.Repo.BranchName @@ -442,14 +452,6 @@ func trainJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArts outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath ctx.Data["train_url"] = outputObsPath - Branches, err := ctx.Repo.GitRepo.GetBranches() - if err != nil { - ctx.ServerError("GetBranches error:", err) - return err - } - ctx.Data["Branches"] = Branches - ctx.Data["BranchesCount"] = len(Branches) - configList, err := getConfigList(modelarts.PerPage, 1, modelarts.SortByCreateTime, "desc", "", modelarts.ConfigTypeCustom) if err != nil { ctx.ServerError("getConfigList failed:", err) @@ -545,13 +547,13 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error { outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath ctx.Data["train_url"] = outputObsPath - Branches, err := ctx.Repo.GitRepo.GetBranches() + branches, _, err := ctx.Repo.GitRepo.GetBranches(0, 0) if err != nil { ctx.ServerError("GetBranches error:", err) return err } - ctx.Data["branches"] = Branches + ctx.Data["branches"] = branches ctx.Data["branch_name"] = task.BranchName ctx.Data["description"] = task.Description ctx.Data["boot_file"] = task.BootFile @@ -634,12 +636,12 @@ func versionErrorDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrai outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath ctx.Data["train_url"] = outputObsPath - Branches, err := ctx.Repo.GitRepo.GetBranches() + branches, _, err := ctx.Repo.GitRepo.GetBranches(0, 0) if err != nil { ctx.ServerError("GetBranches error:", err) return err } - ctx.Data["branches"] = Branches + ctx.Data["branches"] = branches ctx.Data["description"] = form.Description ctx.Data["dataset_name"] = task.DatasetName ctx.Data["work_server_number"] = form.WorkServerNumber @@ -687,6 +689,21 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) VersionCount := modelarts.VersionCount EngineName := form.EngineName + count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID) + if err != nil { + log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"]) + trainJobErrorNewDataPrepare(ctx, form) + ctx.RenderWithErr("system error", tplModelArtsTrainJobNew, &form) + return + } else { + if count >= 1 { + log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) + trainJobErrorNewDataPrepare(ctx, form) + ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsTrainJobNew, &form) + return + } + } + if err := paramCheckCreateTrainJob(form); err != nil { log.Error("paramCheckCreateTrainJob failed:(%v)", err) trainJobErrorNewDataPrepare(ctx, form) @@ -839,7 +856,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm) return } - err := modelarts.GenerateTrainJob(ctx, req) + err = modelarts.GenerateTrainJob(ctx, req) if err != nil { log.Error("GenerateTrainJob failed:%v", err.Error()) trainJobErrorNewDataPrepare(ctx, form) @@ -853,6 +870,21 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ ctx.Data["PageIsTrainJob"] = true var jobID = ctx.Params(":jobid") + count, err := models.GetCloudbrainTrainJobCountByUserID(ctx.User.ID) + if err != nil { + log.Error("GetCloudbrainTrainJobCountByUserID failed:%v", err, ctx.Data["MsgID"]) + versionErrorDataPrepare(ctx, form) + ctx.RenderWithErr("system error", tplModelArtsTrainJobVersionNew, &form) + return + } else { + if count >= 1 { + log.Error("the user already has running or waiting task", ctx.Data["MsgID"]) + versionErrorDataPrepare(ctx, form) + ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplModelArtsTrainJobVersionNew, &form) + return + } + } + latestTask, err := models.GetCloudbrainByJobIDAndIsLatestVersion(jobID, modelarts.IsLatestVersion) if err != nil { ctx.ServerError("GetCloudbrainByJobIDAndIsLatestVersion faild:", err) diff --git a/routers/repo/repo.go b/routers/repo/repo.go index 437521d5a..a182e9087 100644 --- a/routers/repo/repo.go +++ b/routers/repo/repo.go @@ -532,6 +532,7 @@ func Download(ctx *context.Context) { } ctx.Repo.Repository.IncreaseCloneCnt() + ctx.Repo.Repository.IncreaseGitCloneCnt() ctx.ServeFile(archivePath, ctx.Repo.Repository.Name+"-"+refName+ext) } diff --git a/routers/repo/setting.go b/routers/repo/setting.go index bf11f9e5a..f7da8f4a8 100644 --- a/routers/repo/setting.go +++ b/routers/repo/setting.go @@ -239,6 +239,18 @@ func SettingsPost(ctx *context.Context, form auth.RepoSettingForm) { deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeCloudBrain) } + if form.EnableModelManager && !models.UnitTypeModelManage.UnitGlobalDisabled() { + units = append(units, models.RepoUnit{ + RepoID: repo.ID, + Type: models.UnitTypeModelManage, + Config: &models.ModelManageConfig{ + EnableModelManage: form.EnableModelManager, + }, + }) + } else if !models.UnitTypeModelManage.UnitGlobalDisabled() { + deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeModelManage) + } + if form.EnableWiki && form.EnableExternalWiki && !models.UnitTypeExternalWiki.UnitGlobalDisabled() { if !validation.IsValidExternalURL(form.ExternalWikiURL) { ctx.Flash.Error(ctx.Tr("repo.settings.external_wiki_url_error")) diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 67794bf88..a8f820dba 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -12,6 +12,8 @@ import ( "text/template" "time" + "code.gitea.io/gitea/modules/cloudbrain" + "code.gitea.io/gitea/routers/operation" "code.gitea.io/gitea/routers/private" @@ -612,6 +614,8 @@ func RegisterRoutes(m *macaron.Macaron) { reqRepoDatasetWriter := context.RequireRepoWriter(models.UnitTypeDatasets) reqRepoCloudBrainReader := context.RequireRepoReader(models.UnitTypeCloudBrain) reqRepoCloudBrainWriter := context.RequireRepoWriter(models.UnitTypeCloudBrain) + reqRepoModelManageReader := context.RequireRepoReader(models.UnitTypeModelManage) + reqRepoModelManageWriter := context.RequireRepoWriter(models.UnitTypeModelManage) //reqRepoBlockChainReader := context.RequireRepoReader(models.UnitTypeBlockChain) //reqRepoBlockChainWriter := context.RequireRepoWriter(models.UnitTypeBlockChain) @@ -957,26 +961,43 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", reqRepoCloudBrainReader, repo.CloudBrainIndex) m.Group("/:jobid", func() { m.Get("", reqRepoCloudBrainReader, repo.CloudBrainShow) - m.Get("/debug", reqRepoCloudBrainReader, repo.CloudBrainDebug) - m.Post("/commit_image", reqRepoCloudBrainWriter, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage) - m.Post("/stop", reqRepoCloudBrainWriter, repo.CloudBrainStop) - m.Post("/del", reqRepoCloudBrainWriter, repo.CloudBrainDel) + m.Get("/debug", reqRepoCloudBrainWriter, repo.CloudBrainDebug) + m.Post("/commit_image", cloudbrain.AdminOrOwnerOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage) + m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainStop) + m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDel) m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate) m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels) - m.Get("/download_model", reqRepoCloudBrainReader, repo.CloudBrainDownloadModel) + m.Get("/download_model", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDownloadModel) }) - m.Get("/create", reqRepoCloudBrainReader, repo.CloudBrainNew) + m.Get("/create", reqRepoCloudBrainWriter, repo.CloudBrainNew) m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) }, context.RepoRef()) + m.Group("/modelmanage", func() { + m.Post("/create_model", reqRepoModelManageWriter, repo.SaveModel) + m.Delete("/delete_model", repo.DeleteModel) + m.Put("/modify_model", repo.ModifyModelInfo) + m.Get("/show_model", reqRepoModelManageReader, repo.ShowModelTemplate) + m.Get("/show_model_info", repo.ShowModelInfo) + m.Get("/show_model_info_api", repo.ShowSingleModel) + m.Get("/show_model_api", repo.ShowModelPageInfo) + m.Get("/show_model_child_api", repo.ShowOneVersionOtherModel) + m.Get("/query_train_job", reqRepoCloudBrainReader, repo.QueryTrainJobList) + m.Get("/query_train_job_version", reqRepoCloudBrainReader, repo.QueryTrainJobVersionList) + m.Group("/:ID", func() { + m.Get("", repo.ShowSingleModel) + m.Get("/downloadsingle", repo.DownloadSingleModelFile) + }) + m.Get("/downloadall", repo.DownloadMultiModelFile) + }, context.RepoRef()) m.Group("/modelarts", func() { m.Group("/notebook", func() { m.Get("", reqRepoCloudBrainReader, repo.NotebookIndex) m.Group("/:jobid", func() { m.Get("", reqRepoCloudBrainReader, repo.NotebookShow) - m.Get("/debug", reqRepoCloudBrainReader, repo.NotebookDebug) - m.Post("/stop", reqRepoCloudBrainWriter, repo.NotebookStop) - m.Post("/del", reqRepoCloudBrainWriter, repo.NotebookDel) + m.Get("/debug", reqRepoCloudBrainWriter, repo.NotebookDebug) + m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookStop) + m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.NotebookDel) }) m.Get("/create", reqRepoCloudBrainWriter, repo.NotebookNew) m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsNotebookForm{}), repo.NotebookCreate) @@ -986,13 +1007,13 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", reqRepoCloudBrainReader, repo.TrainJobIndex) m.Group("/:jobid", func() { m.Get("", reqRepoCloudBrainReader, repo.TrainJobShow) - m.Post("/stop", reqRepoCloudBrainWriter, repo.TrainJobStop) - m.Post("/del", reqRepoCloudBrainWriter, repo.TrainJobDel) - m.Get("/model_download", reqRepoCloudBrainReader, repo.ModelDownload) - m.Get("/create_version", reqRepoCloudBrainReader, repo.TrainJobNewVersion) - m.Post("/create_version", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) + m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.TrainJobStop) + m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.TrainJobDel) + m.Get("/model_download", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.ModelDownload) + m.Get("/create_version", cloudbrain.AdminOrJobCreaterRight, repo.TrainJobNewVersion) + m.Post("/create_version", cloudbrain.AdminOrJobCreaterRight, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) }) - m.Get("/create", reqRepoCloudBrainReader, repo.TrainJobNew) + m.Get("/create", reqRepoCloudBrainWriter, repo.TrainJobNew) m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreate) m.Get("/para-config-list", reqRepoCloudBrainReader, repo.TrainJobGetConfigList) diff --git a/routers/user/profile.go b/routers/user/profile.go index fe5dc4598..9b99fa741 100755 --- a/routers/user/profile.go +++ b/routers/user/profile.go @@ -214,10 +214,15 @@ func Profile(ctx *context.Context) { total = int(count) case "datasets": + var isOwner = false + if ctx.User != nil && ctx.User.ID == ctxUser.ID { + isOwner = true + } datasetSearchOptions := &models.SearchDatasetOptions{ Keyword: keyword, OwnerID: ctxUser.ID, SearchOrderBy: orderBy, + IsOwner: isOwner, ListOptions: models.ListOptions{ Page: page, PageSize: setting.UI.ExplorePagingNum, diff --git a/services/mirror/mirror.go b/services/mirror/mirror.go old mode 100644 new mode 100755 index 165e7cd35..924574471 --- a/services/mirror/mirror.go +++ b/services/mirror/mirror.go @@ -252,7 +252,7 @@ func runSync(m *models.Mirror) ([]*mirrorSyncResult, bool) { } } - branches, err := repo_module.GetBranches(m.Repo) + branches, _, err := repo_module.GetBranches(m.Repo,0,0) if err != nil { log.Error("GetBranches: %v", err) return nil, false diff --git a/services/pull/pull.go b/services/pull/pull.go old mode 100644 new mode 100755 index fb4af0637..230a9e389 --- a/services/pull/pull.go +++ b/services/pull/pull.go @@ -452,7 +452,7 @@ func CloseBranchPulls(doer *models.User, repoID int64, branch string) error { // CloseRepoBranchesPulls close all pull requests which head branches are in the given repository func CloseRepoBranchesPulls(doer *models.User, repo *models.Repository) error { - branches, err := git.GetBranchesByPath(repo.RepoPath()) + branches, _, err := git.GetBranchesByPath(repo.RepoPath(), 0, 0) if err != nil { return err } diff --git a/templates/base/head_navbar.tmpl b/templates/base/head_navbar.tmpl index c0c1aff35..c87524afb 100755 --- a/templates/base/head_navbar.tmpl +++ b/templates/base/head_navbar.tmpl @@ -17,47 +17,63 @@ {{if .IsSigned}} - {{.i18n.Tr "index"}} - {{.i18n.Tr "custom.head.openi"}} - {{if not .UnitIssuesGlobalDisabled}} - {{.i18n.Tr "issues"}} - {{end}} - {{if not .UnitPullsGlobalDisabled}} - {{.i18n.Tr "pull_requests"}} - {{end}} - {{if not (and .UnitIssuesGlobalDisabled .UnitPullsGlobalDisabled)}} - {{if .ShowMilestonesDashboardPage}}{{.i18n.Tr "milestones"}}{{end}} - {{end}} - {{else if .IsLandingPageHome}} - {{.i18n.Tr "home"}} - {{.i18n.Tr "custom.head.openi"}} + -