diff --git a/README.md b/README.md index 1d9ab8d06..7c954c2e4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@

logoAiForge - 启智AI开发协作平台

-[![release](https://img.shields.io/badge/release-1.21.11.1-blue)](https://git.openi.org.cn/OpenI/aiforge/releases/latest) +[![release](https://img.shields.io/badge/release-1.21.11.1-blue)](https://openi.pcl.ac.cn/OpenI/aiforge/releases/latest) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) @@ -10,7 +10,7 @@ 启智AI开发协作平台是一个在线Web应用,旨在为人工智能算法、模型开发提供在线协同工作环境,它提供了代码托管、数据集管理与共享、免费云端算力资源支持(GPU/NPU)、共享镜像等功能。 -[启智AI开发协作平台](https://git.openi.org.cn) 是使用本项目构建的在线服务,您可以直接点击链接访问试用。 +[启智AI开发协作平台](https://openi.pcl.ac.cn) 是使用本项目构建的在线服务,您可以直接点击链接访问试用。 本项目是基于[Gitea](https://github.com/go-gitea/gitea)发展而来的,我们对其进行了Fork并基于此扩展了人工智能开发中需要的功能,如数据集管理和模型训练等。对于和代码托管相关的功能,您可以参考[Gitea的文档](https://docs.gitea.io/zh-cn/)。 @@ -20,7 +20,7 @@ 后端服务涵盖了AI模型开发流水线,包括代码协同开发、数据管理、模型调试、训练、推理和部署等(*目前尚未支持模型部署*)。在不同的开发阶段,我们还将提供丰富的开发工具供用户使用,如数据标注、数据筛选、模型转换、模型压缩、代码检测等。我们也欢迎社区提供更多丰富的工具接入,提高利用平台进行开发的效率。 ![系统架构图](assets/架构图.png) ## 在线服务使用 -本项目的在线服务平台的详细使用帮助文档,可参阅本项目[百科](https://git.openi.org.cn/OpenI/aiforge/wiki)内容。 +本项目的在线服务平台的详细使用帮助文档,可参阅本项目[百科](https://openi.pcl.ac.cn/OpenI/aiforge/wiki)内容。 - 如何创建账号 - 如何创建组织及管理成员权限 - 如何创建项目仓库 @@ -39,22 +39,22 @@ [从源代码安装说明](https://docs.gitea.io/zh-cn/install-from-source/) ## 授权许可 -本项目采用 MIT 开源授权许可证,完整的授权说明已放置在 [LICENSE](https://git.openi.org.cn/OpenI/aiforge/src/branch/develop/LICENSE) 文件中。 +本项目采用 MIT 开源授权许可证,完整的授权说明已放置在 [LICENSE](https://openi.pcl.ac.cn/OpenI/aiforge/src/branch/develop/LICENSE) 文件中。 ## 需要帮助? 如果您在使用或者开发过程中遇到问题,可以在以下渠道咨询: - - 点击[这里](https://git.openi.org.cn/OpenI/aiforge/issues)在线提交问题(点击页面右上角绿色按钮**创建任务**) + - 点击[这里](https://openi.pcl.ac.cn/OpenI/aiforge/issues)在线提交问题(点击页面右上角绿色按钮**创建任务**) - 加入微信群实时交流,获得进一步的支持 - + ## 启智社区小白训练营: -- 结合案例给大家详细讲解如何使用社区平台,帮助无技术背景的小白成长为启智社区达人 (https://git.openi.org.cn/zeizei/OpenI_Learning) +- 结合案例给大家详细讲解如何使用社区平台,帮助无技术背景的小白成长为启智社区达人 (https://openi.pcl.ac.cn/zeizei/OpenI_Learning) ## 平台引用 如果本平台对您的科研工作提供了帮助,可在论文致谢中加入: -英文版:```Thanks for the support provided by OpenI Community (https://git.openi.org.cn).``` -中文版:```感谢启智社区提供的技术支持(https://git.openi.org.cn)。``` +英文版:```Thanks for the support provided by OpenI Community (https://openi.pcl.ac.cn).``` +中文版:```感谢启智社区提供的技术支持(https://openi.pcl.ac.cn)。``` 如果您的成果中引用了本平台,也欢迎在下述开源项目中提交您的成果信息: -https://git.openi.org.cn/OpenIOSSG/references +https://openi.pcl.ac.cn/OpenIOSSG/references diff --git a/models/ai_model_manage.go b/models/ai_model_manage.go index 0d754b0ba..5b14b9ba2 100644 --- a/models/ai_model_manage.go +++ b/models/ai_model_manage.go @@ -14,6 +14,7 @@ import ( type AiModelManage struct { ID string `xorm:"pk" json:"id"` Name string `xorm:"INDEX NOT NULL" json:"name"` + ModelType int `xorm:"NULL" json:"modelType"` Version string `xorm:"NOT NULL" json:"version"` VersionCount int `xorm:"NOT NULL DEFAULT 0" json:"versionCount"` New int `xorm:"NOT NULL" json:"new"` @@ -287,6 +288,37 @@ func ModifyModelDescription(id string, description string) error { return nil } +func ModifyLocalModel(id string, name, label, description string, engine int) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("name", "label", "description", "engine").Update(&AiModelManage{ + Description: description, + Name: name, + Label: label, + Engine: int64(engine), + }) + if err != nil { + return err + } + log.Info("success to update description from db.re=" + fmt.Sprint((re))) + return nil +} + +func ModifyModelSize(id string, size int64) error { + var sess *xorm.Session + sess = x.ID(id) + defer sess.Close() + re, err := sess.Cols("size").Update(&AiModelManage{ + Size: size, + }) + if err != nil { + return err + } + log.Info("success to update size from db.re=" + fmt.Sprint((re))) + return nil +} + func ModifyModelStatus(id string, modelSize int64, status int, modelPath string, statusDesc string) error { var sess *xorm.Session sess = x.ID(id) diff --git a/models/attachment.go b/models/attachment.go index 2b5fa8efc..2b747db21 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -134,7 +134,8 @@ func (a *Attachment) S3DownloadURL() string { if a.Type == TypeCloudBrainOne { url, _ = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+AttachmentRelativePath(a.UUID), a.Name) } else if a.Type == TypeCloudBrainTwo { - url, _ = storage.ObsGetPreSignedUrl(a.UUID, a.Name) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(a.UUID[0:1], a.UUID[1:2], a.UUID, a.Name)), "/") + url, _ = storage.ObsGetPreSignedUrl(objectName, a.Name) } return url @@ -550,7 +551,6 @@ func AttachmentsByDatasetOption(datasets []int64, opts *SearchDatasetOptions) ([ ) } - attachments := make([]*Attachment, 0) if err := sess.Table(&Attachment{}).Where(cond).Desc("id"). Find(&attachments); err != nil { diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 6a084b682..ef2c699eb 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -291,6 +291,13 @@ func (task *Cloudbrain) IsRunning() bool { status == string(JobRunning) || status == GrampusStatusRunning } +func (task *Cloudbrain) IsUserHasRight(user *User) bool { + if user == nil { + return false + } + return user.IsAdmin || user.ID == task.UserID +} + func ConvertDurationToStr(duration int64) string { if duration <= 0 { return DURATION_STR_ZERO diff --git a/models/file_chunk.go b/models/file_chunk.go index 0fc3a8879..cad7746b7 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -28,6 +28,23 @@ type FileChunk struct { UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } +type ModelFileChunk struct { + ID int64 `xorm:"pk autoincr"` + UUID string `xorm:"INDEX"` + Md5 string `xorm:"INDEX"` + ModelUUID string `xorm:"INDEX"` + ObjectName string `xorm:"DEFAULT ''"` + IsUploaded int `xorm:"DEFAULT 0"` // not uploaded: 0, uploaded: 1 + UploadID string `xorm:"UNIQUE"` //minio upload id + TotalChunks int + Size int64 + UserID int64 `xorm:"INDEX"` + Type int `xorm:"INDEX DEFAULT 0"` + CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +} + // GetFileChunkByMD5 returns fileChunk by given id func GetFileChunkByMD5(md5 string) (*FileChunk, error) { return getFileChunkByMD5(x, md5) @@ -49,6 +66,21 @@ func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*Fi return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain) } +func GetModelFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) { + return getModelFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain, uuid) +} + +func getModelFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) { + fileChunk := new(ModelFileChunk) + + if has, err := e.Where("md5 = ? and user_id = ? and type = ? and model_uuid= ?", md5, userID, typeCloudBrain, uuid).Get(fileChunk); err != nil { + return nil, err + } else if !has { + return nil, ErrFileChunkNotExist{md5, ""} + } + return fileChunk, nil +} + func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { fileChunk := new(FileChunk) @@ -76,6 +108,21 @@ func getFileChunkByUUID(e Engine, uuid string) (*FileChunk, error) { return fileChunk, nil } +func GetModelFileChunkByUUID(uuid string) (*ModelFileChunk, error) { + return getModelFileChunkByUUID(x, uuid) +} + +func getModelFileChunkByUUID(e Engine, uuid string) (*ModelFileChunk, error) { + fileChunk := new(ModelFileChunk) + + if has, err := e.Where("uuid = ?", uuid).Get(fileChunk); err != nil { + return nil, err + } else if !has { + return nil, ErrFileChunkNotExist{"", uuid} + } + return fileChunk, nil +} + // InsertFileChunk insert a record into file_chunk. func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) { if _, err := x.Insert(fileChunk); err != nil { @@ -85,6 +132,14 @@ func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) { return fileChunk, nil } +// InsertFileChunk insert a record into file_chunk. +func InsertModelFileChunk(fileChunk *ModelFileChunk) (_ *ModelFileChunk, err error) { + if _, err := x.Insert(fileChunk); err != nil { + return nil, err + } + return fileChunk, nil +} + func DeleteFileChunkById(uuid string) (*FileChunk, error) { return deleteFileChunkById(x, uuid) } @@ -106,6 +161,17 @@ func deleteFileChunkById(e Engine, uuid string) (*FileChunk, error) { } } +func UpdateModelFileChunk(fileChunk *ModelFileChunk) error { + return updateModelFileChunk(x, fileChunk) +} + +func updateModelFileChunk(e Engine, fileChunk *ModelFileChunk) error { + var sess *xorm.Session + sess = e.Where("uuid = ?", fileChunk.UUID) + _, err := sess.Cols("is_uploaded").Update(fileChunk) + return err +} + // UpdateFileChunk updates the given file_chunk in database func UpdateFileChunk(fileChunk *FileChunk) error { return updateFileChunk(x, fileChunk) @@ -127,3 +193,12 @@ func deleteFileChunk(e Engine, fileChunk *FileChunk) error { _, err := e.ID(fileChunk.ID).Delete(fileChunk) return err } + +func DeleteModelFileChunk(fileChunk *ModelFileChunk) error { + return deleteModelFileChunk(x, fileChunk) +} + +func deleteModelFileChunk(e Engine, fileChunk *ModelFileChunk) error { + _, err := e.ID(fileChunk.ID).Delete(fileChunk) + return err +} diff --git a/models/models.go b/models/models.go index a4ec43f43..6427c576c 100755 --- a/models/models.go +++ b/models/models.go @@ -136,6 +136,7 @@ func init() { new(ImageTopic), new(ImageTopicRelation), new(FileChunk), + new(ModelFileChunk), new(BlockChain), new(RecommendOrg), new(AiModelManage), @@ -185,6 +186,7 @@ func init() { new(UserAnalysisPara), new(Invitation), new(CloudbrainDurationStatistic), + new(UserSummaryCurrentYear), ) gonicNames := []string{"SSL", "UID"} diff --git a/models/resource_specification.go b/models/resource_specification.go index a6f5f1f82..7a11edd05 100644 --- a/models/resource_specification.go +++ b/models/resource_specification.go @@ -12,6 +12,13 @@ const ( SpecOffShelf ) +type SearchSpecOrderBy int + +const ( + SearchSpecOrderById SearchSpecOrderBy = iota + SearchSpecOrder4Standard +) + type ResourceSpecification struct { ID int64 `xorm:"pk autoincr"` QueueId int64 `xorm:"INDEX"` @@ -85,6 +92,7 @@ type SearchResourceSpecificationOptions struct { Status int Cluster string AvailableCode int + OrderBy SearchSpecOrderBy } type SearchResourceBriefSpecificationOptions struct { @@ -233,10 +241,18 @@ func SearchResourceSpecification(opts SearchResourceSpecificationOptions) (int64 return 0, nil, err } + var orderby = "" + switch opts.OrderBy { + case SearchSpecOrder4Standard: + orderby = "resource_queue.compute_resource asc,resource_queue.acc_card_type asc,resource_specification.acc_cards_num asc,resource_specification.cpu_cores asc,resource_specification.mem_gi_b asc,resource_specification.share_mem_gi_b asc" + default: + orderby = "resource_specification.id desc" + } + r := make([]ResourceSpecAndQueue, 0) err = x.Where(cond). Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id"). - Desc("resource_specification.id"). + OrderBy(orderby). Limit(opts.PageSize, (opts.Page-1)*opts.PageSize). Unscoped().Find(&r) if err != nil { diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index e99927e18..394c24825 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -3,12 +3,15 @@ package models import ( "encoding/json" "fmt" + "io/ioutil" + "net/http" "sort" "strconv" "strings" "time" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" "xorm.io/xorm" @@ -19,185 +22,6 @@ const ( BATCH_INSERT_SIZE = 50 ) -type UserBusinessAnalysisAll struct { - ID int64 `xorm:"pk"` - - CountDate int64 `xorm:"pk"` - - //action :ActionMergePullRequest // 11 - CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCommitRepo // 5 - CommitCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCreateIssue // 10 - IssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //comment table current date - CommentCount int `xorm:"NOT NULL DEFAULT 0"` - - //watch table current date - FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //star table current date - StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //follow table - WatchedCount int `xorm:"NOT NULL DEFAULT 0"` - - // user table - GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` - - // - CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` - - //attachement table - CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` - - //0 - CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` - - //issue, issueassignees - SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //baike - EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` - - //user - RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` - - //repo - CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //login count, from elk - LoginCount int `xorm:"NOT NULL DEFAULT 0"` - - //openi index - OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` - - //user - Email string `xorm:"NOT NULL"` - - //user - Name string `xorm:"NOT NULL"` - - DataDate string `xorm:"NULL"` - - //cloudbraintask - CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` - GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` - GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` - CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` - CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` - UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` - UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` - - UserLocation string `xorm:"NULL"` - - FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` - CollectDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` - RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectImage int `xorm:"NOT NULL DEFAULT 0"` - CollectedImage int `xorm:"NOT NULL DEFAULT 0"` - RecommendImage int `xorm:"NOT NULL DEFAULT 0"` - - Phone string `xorm:"NULL"` - InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` -} - -type UserBusinessAnalysis struct { - ID int64 `xorm:"pk"` - DataDate string `xorm:"pk"` - CountDate int64 `xorm:"NULL"` - - //action :ActionMergePullRequest // 11 - CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCommitRepo // 5 - CommitCount int `xorm:"NOT NULL DEFAULT 0"` - - //action :ActionCreateIssue // 6 - IssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //comment table current date - CommentCount int `xorm:"NOT NULL DEFAULT 0"` - - //watch table current date - FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //star table current date - StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //follow table - WatchedCount int `xorm:"NOT NULL DEFAULT 0"` - - // user table - GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` - - // - CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` - - //attachement table - CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` - - //0 - CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` - - //issue, issueassignees - SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` - - //baike - EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` - - //user - RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` - - //repo - CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` - - //login count, from elk - LoginCount int `xorm:"NOT NULL DEFAULT 0"` - - //openi index - OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` - - //user - Email string `xorm:"NOT NULL"` - - //user - Name string `xorm:"NOT NULL"` - - CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` - GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` - GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` - NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` - GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` - CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` - CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` - UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` - UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` - - UserLocation string `xorm:"NULL"` - - FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` - CollectDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` - RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` - CollectImage int `xorm:"NOT NULL DEFAULT 0"` - CollectedImage int `xorm:"NOT NULL DEFAULT 0"` - RecommendImage int `xorm:"NOT NULL DEFAULT 0"` - - Phone string `xorm:"NULL"` - InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` -} - type UserBusinessAnalysisQueryOptions struct { ListOptions UserName string @@ -499,7 +323,7 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi DataDate := currentTimeNow.Format("2006-01-02 15:04") CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, _ := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -517,16 +341,16 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, _ := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) @@ -752,7 +576,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS startTime := currentTimeNow.AddDate(0, 0, -1) CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, mostActiveMap := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -764,13 +588,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS log.Info("query commit code errr.") } else { log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) - CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) - log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) + //CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) + //log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } //CommitCodeSizeMap := queryCommitCodeSize(StartTimeNextDay.Unix(), EndTimeNextDay.Unix()) - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, dataSetDownloadMap := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, DetailInfoMap, MostDownloadMap := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix) @@ -778,14 +602,19 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, CreatedDataset := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) InvitationMap := queryUserInvitationCount(start_unix, end_unix) DataDate := currentTimeNow.Format("2006-01-02") + " 00:01" - + bonusMap := make(map[string]map[string]int) + if tableName == "user_business_analysis_current_year" { + bonusMap = getBonusMap() + log.Info("truncate all data from table:user_summary_current_year ") + statictisSess.Exec("TRUNCATE TABLE user_summary_current_year") + } cond := "type != 1 and is_active=true" count, err := sess.Where(cond).Count(new(User)) if err != nil { @@ -883,6 +712,37 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS userMetrics["TotalHasActivityUser"] = getMapKeyStringValue("TotalHasActivityUser", userMetrics) + 1 } } + if tableName == "user_business_analysis_current_year" { + //年度数据 + subTime := time.Now().UTC().Sub(dateRecordAll.RegistDate.AsTime().UTC()) + mostActiveDay := "" + if userInfo, ok := mostActiveMap[dateRecordAll.ID]; ok { + mostActiveDay = getMostActiveJson(userInfo) + } + scoreMap := make(map[string]float64) + repoInfo := getRepoDetailInfo(DetailInfoMap, dateRecordAll.ID, MostDownloadMap) + dataSetInfo, datasetscore := getDataSetInfo(dateRecordAll.ID, CreatedDataset, dataSetDownloadMap, CommitDatasetNumMap, CollectedDataset) + scoreMap["datasetscore"] = datasetscore + codeInfo, codescore := getCodeInfo(dateRecordAll) + scoreMap["codescore"] = codescore + cloudBrainInfo := getCloudBrainInfo(dateRecordAll, CloudBrainTaskItemMap, scoreMap) + playARoll := getPlayARoll(bonusMap, dateRecordAll.Name, scoreMap) + re := &UserSummaryCurrentYear{ + ID: dateRecordAll.ID, + Name: dateRecordAll.Name, + Email: dateRecordAll.Email, + Phone: dateRecordAll.Phone, + RegistDate: dateRecordAll.RegistDate, + DateCount: int(subTime.Hours()) / 24, + MostActiveDay: mostActiveDay, + RepoInfo: repoInfo, + DataSetInfo: dataSetInfo, + CodeInfo: codeInfo, + CloudBrainInfo: cloudBrainInfo, + PlayARoll: playARoll, + } + statictisSess.Insert(re) + } } if len(dateRecordBatch) > 0 { err := insertTable(dateRecordBatch, tableName, statictisSess) @@ -890,6 +750,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS if err != nil { log.Info("insert all data failed." + err.Error()) } + } indexTotal += PAGE_SIZE if indexTotal >= count { @@ -911,6 +772,204 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) } +func getBonusMap() map[string]map[string]int { + bonusMap := make(map[string]map[string]int) + url := setting.RecommentRepoAddr + "bonus/record.txt" + content, err := GetContentFromPromote(url) + if err == nil { + filenames := strings.Split(content, "\n") + for i := 0; i < len(filenames); i++ { + url = setting.RecommentRepoAddr + "bonus/" + filenames[i] + csvContent, err1 := GetContentFromPromote(url) + if err1 == nil { + //read csv + lines := strings.Split(csvContent, "\n") + for j := 1; j < len(lines); j++ { + aLine := strings.Split(lines[j], ",") + if len(aLine) < 7 { + continue + } + userName := aLine[1] + //email := lines[2] + record, ok := bonusMap[userName] + if !ok { + record = make(map[string]int) + } + record["times"] = getMapKeyStringValue("times", record) + getIntValue(aLine[3]) + record["total_bonus"] = getMapKeyStringValue("total_bonus", record) + getIntValue(aLine[4]) + record["total_cardtime"] = getMapKeyStringValue("total_cardtime", record) + getIntValue(aLine[5]) + record["total_giveup"] = getMapKeyStringValue("total_giveup", record) + getIntValue(aLine[6]) + } + } + } + } + return bonusMap +} + +func getIntValue(val string) int { + i, err := strconv.Atoi(val) + if err == nil { + return i + } + return 0 +} + +func getPlayARoll(bonusMap map[string]map[string]int, userName string, scoreMap map[string]float64) string { + bonusInfo := make(map[string]string) + record, ok := bonusMap[userName] + if ok { + rollscore := 0.0 + bonusInfo["times"] = fmt.Sprint(record["times"]) + if record["times"] >= 4 { + rollscore = float64(record["times"]) / float64(4) + } + scoreMap["rollscore"] = rollscore + bonusInfo["total_bonus"] = fmt.Sprint(record["total_bonus"]) + bonusInfo["total_cardtime"] = fmt.Sprint(record["total_cardtime"]) + bonusInfo["total_giveup"] = fmt.Sprint(record["total_giveup"]) + bonusInfoJson, _ := json.Marshal(bonusInfo) + return string(bonusInfoJson) + } else { + return "" + } +} + +func getCloudBrainInfo(dateRecordAll UserBusinessAnalysisAll, CloudBrainTaskItemMap map[string]int, scoreMap map[string]float64) string { + trainscore := 0.0 + debugscore := 0.0 + runtime := 0.0 + if dateRecordAll.CloudBrainTaskNum > 0 { + cloudBrainInfo := make(map[string]string) + cloudBrainInfo["create_task_num"] = fmt.Sprint(dateRecordAll.CloudBrainTaskNum) + cloudBrainInfo["debug_task_num"] = fmt.Sprint(dateRecordAll.GpuDebugJob + dateRecordAll.NpuDebugJob) + if dateRecordAll.GpuDebugJob+dateRecordAll.NpuDebugJob >= 50 { + debugscore = float64(dateRecordAll.GpuDebugJob+dateRecordAll.NpuDebugJob) / float64(50) + } + cloudBrainInfo["train_task_num"] = fmt.Sprint(dateRecordAll.GpuTrainJob + dateRecordAll.NpuTrainJob) + if dateRecordAll.GpuTrainJob+dateRecordAll.NpuTrainJob >= 50 { + trainscore = float64(dateRecordAll.GpuTrainJob+dateRecordAll.NpuTrainJob) / float64(50) + } + cloudBrainInfo["inference_task_num"] = fmt.Sprint(dateRecordAll.NpuInferenceJob + CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_GpuInferenceJob"]) + cloudBrainInfo["card_runtime"] = fmt.Sprint(dateRecordAll.CloudBrainRunTime) + if dateRecordAll.CloudBrainRunTime >= 100 { + runtime = float64(dateRecordAll.CloudBrainRunTime) / float64(100) + } + cloudBrainInfo["card_runtime_money"] = fmt.Sprint(dateRecordAll.CloudBrainRunTime * 5) + cloudBrainInfo["CloudBrainOne"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainOne"]) + cloudBrainInfo["CloudBrainTwo"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainTwo"]) + cloudBrainInfo["C2Net"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_C2Net"]) + + cloudBrainInfoJson, _ := json.Marshal(cloudBrainInfo) + scoreMap["trainscore"] = trainscore + scoreMap["debugscore"] = debugscore + scoreMap["runtime"] = runtime + return string(cloudBrainInfoJson) + } else { + scoreMap["trainscore"] = trainscore + scoreMap["debugscore"] = debugscore + scoreMap["runtime"] = runtime + return "" + } +} + +func getCodeInfo(dateRecordAll UserBusinessAnalysisAll) (string, float64) { + if dateRecordAll.CommitCount > 0 { + codeInfo := make(map[string]string) + codeInfo["commit_count"] = fmt.Sprint(dateRecordAll.CommitCount) + codeInfo["commit_line"] = fmt.Sprint(dateRecordAll.CommitCodeSize) + score := 0.0 + score = float64(dateRecordAll.CommitCodeSize) / float64(dateRecordAll.CommitCount) / float64(20000) + if score < (float64(dateRecordAll.CommitCount) / float64(100)) { + score = float64(dateRecordAll.CommitCount) / float64(100) + } + codeInfo["score"] = fmt.Sprintf("%.2f", score) + + codeInfoJson, _ := json.Marshal(codeInfo) + return string(codeInfoJson), score + } else { + return "", 0 + } +} + +func getDataSetInfo(userId int64, CreatedDataset map[int64]int, dataSetDownloadMap map[int64]int, CommitDatasetNumMap map[int64]int, CollectedDataset map[int64]int) (string, float64) { + datasetInfo := make(map[string]string) + score := 0.0 + if create_count, ok := CreatedDataset[userId]; ok { + datasetInfo["create_count"] = fmt.Sprint(create_count) + score = float64(create_count) / 10 + } + if upload_count, ok := CommitDatasetNumMap[userId]; ok { + datasetInfo["upload_file_count"] = fmt.Sprint(upload_count) + } + if download_count, ok := dataSetDownloadMap[userId]; ok { + datasetInfo["download_count"] = fmt.Sprint(download_count) + } + if cllected_count, ok := CollectedDataset[userId]; ok { + datasetInfo["cllected_count"] = fmt.Sprint(cllected_count) + } + + if len(datasetInfo) > 0 { + datasetInfoJson, _ := json.Marshal(datasetInfo) + return string(datasetInfoJson), score + } else { + return "", score + } +} + +func getRepoDetailInfo(repoDetailInfoMap map[string]int, userId int64, mostDownload map[int64]string) string { + repoDetailInfo := make(map[string]string) + if total, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total"]; ok { + repoDetailInfo["repo_total"] = fmt.Sprint(total) + } + if private, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_private"]; ok { + repoDetailInfo["repo_is_private"] = fmt.Sprint(private) + } + if public, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_public"]; ok { + repoDetailInfo["repo_is_public"] = fmt.Sprint(public) + } + if download, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total_download"]; ok { + repoDetailInfo["repo_total_download"] = fmt.Sprint(download) + } + if mostdownload, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_most_download"]; ok { + repoDetailInfo["repo_most_download_count"] = fmt.Sprint(mostdownload) + } + if mostdownloadName, ok := mostDownload[userId]; ok { + repoDetailInfo["repo_most_download_name"] = mostdownloadName + } + if len(repoDetailInfo) > 0 { + repoDetailInfoJson, _ := json.Marshal(repoDetailInfo) + return string(repoDetailInfoJson) + } else { + return "" + } +} + +func getMostActiveJson(userInfo map[string]int) string { + mostActiveMap := make(map[string]string) + if day, ok := userInfo["hour_day"]; ok { + hour := userInfo["hour_hour"] + month := userInfo["hour_month"] + year := userInfo["hour_year"] + delete(userInfo, "hour_day") + delete(userInfo, "hour_hour") + delete(userInfo, "hour_month") + delete(userInfo, "hour_year") + mostActiveMap["before_dawn"] = fmt.Sprint(year) + "/" + fmt.Sprint(month) + "/" + fmt.Sprint(day) + " " + fmt.Sprint(hour) + } + max := 0 + max_day := "" + for key, value := range userInfo { + if value > max { + max = value + max_day = key + } + } + mostActiveMap["most_active_day"] = max_day + mostActiveMap["most_active_num"] = fmt.Sprint(max) + mostActiveMapJson, _ := json.Marshal(mostActiveMap) + return string(mostActiveMapJson) +} + func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) { updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex*100) + " where id=" + fmt.Sprint(userId) statictisSess.Exec(updateSql) @@ -997,7 +1056,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, DataDate := CountDate.Format("2006-01-02") CodeMergeCountMap := queryPullRequest(start_unix, end_unix) - CommitCountMap := queryCommitAction(start_unix, end_unix, 5) + CommitCountMap, _ := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) CommentCountMap := queryComment(start_unix, end_unix) @@ -1010,19 +1069,19 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, log.Info("query commit code errr.") } else { //log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap))) - CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) - log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) + //CommitCodeSizeMapJson, _ := json.Marshal(CommitCodeSizeMap) + //log.Info("CommitCodeSizeMapJson=" + string(CommitCodeSizeMapJson)) } - CommitDatasetSizeMap, CommitDatasetNumMap := queryDatasetSize(start_unix, end_unix) + CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix) SolveIssueCountMap := querySolveIssue(start_unix, end_unix) - CreateRepoCountMap := queryUserCreateRepo(start_unix, end_unix) + CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix) LoginCountMap := queryLoginCount(start_unix, end_unix) OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix) CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix) AiModelManageMap := queryUserModel(start_unix, end_unix) CollectDataset, CollectedDataset := queryDatasetStars(start_unix, end_unix) - RecommendDataset := queryRecommedDataSet(start_unix, end_unix) + RecommendDataset, _ := queryRecommedDataSet(start_unix, end_unix) CollectImage, CollectedImage := queryImageStars(start_unix, end_unix) RecommendImage := queryRecommedImage(start_unix, end_unix) @@ -1490,41 +1549,65 @@ func queryPullRequest(start_unix int64, end_unix int64) map[int64]int { return resultMap } -func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[int64]int { +func queryCommitAction(start_unix int64, end_unix int64, actionType int64) (map[int64]int, map[int64]map[string]int) { sess := x.NewSession() defer sess.Close() resultMap := make(map[int64]int) - - cond := "user_id=act_user_id and op_type=" + fmt.Sprint(actionType) + " and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + mostActiveMap := make(map[int64]map[string]int) + cond := "user_id=act_user_id and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Action)) if err != nil { log.Info("query action error. return.") - return resultMap + return resultMap, mostActiveMap } + var indexTotal int64 indexTotal = 0 for { - sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,user_id,op_type,act_user_id,created_unix").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) actionList := make([]*Action, 0) sess.Find(&actionList) log.Info("query action size=" + fmt.Sprint(len(actionList))) for _, actionRecord := range actionList { - if _, ok := resultMap[actionRecord.UserID]; !ok { - resultMap[actionRecord.UserID] = 1 + if int64(actionRecord.OpType) == actionType { + if _, ok := resultMap[actionRecord.UserID]; !ok { + resultMap[actionRecord.UserID] = 1 + } else { + resultMap[actionRecord.UserID] += 1 + } + } + key := getDate(actionRecord.CreatedUnix) + if _, ok := mostActiveMap[actionRecord.UserID]; !ok { + tmpMap := make(map[string]int) + tmpMap[key] = 1 + mostActiveMap[actionRecord.UserID] = tmpMap } else { - resultMap[actionRecord.UserID] += 1 + mostActiveMap[actionRecord.UserID][key] = getMapKeyStringValue(key, mostActiveMap[actionRecord.UserID]) + 1 + } + utcTime := actionRecord.CreatedUnix.AsTime() + hour := utcTime.Hour() + if hour >= 0 && hour <= 5 { + key = "hour_hour" + if getMapKeyStringValue(key, mostActiveMap[actionRecord.UserID]) < hour { + mostActiveMap[actionRecord.UserID][key] = hour + mostActiveMap[actionRecord.UserID]["hour_day"] = utcTime.Day() + mostActiveMap[actionRecord.UserID]["hour_month"] = int(utcTime.Month()) + mostActiveMap[actionRecord.UserID]["hour_year"] = utcTime.Year() + } } } - indexTotal += PAGE_SIZE if indexTotal >= count { break } } - return resultMap + return resultMap, mostActiveMap +} +func getDate(createTime timeutil.TimeStamp) string { + return createTime.Format("2006-01-02") } func queryCreateIssue(start_unix int64, end_unix int64) map[int64]int { @@ -1714,15 +1797,16 @@ func queryFollow(start_unix int64, end_unix int64) (map[int64]int, map[int64]int return resultMap, resultFocusedByOtherMap } -func queryRecommedDataSet(start_unix int64, end_unix int64) map[int64]int { +func queryRecommedDataSet(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() - userIdDdatasetMap := make(map[int64]int) - cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) + " and recommend=true" + userIdRecommentDatasetMap := make(map[int64]int) + userIdCreateDatasetMap := make(map[int64]int) + cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Dataset)) if err != nil { log.Info("query recommend dataset error. return.") - return userIdDdatasetMap + return userIdRecommentDatasetMap, userIdCreateDatasetMap } var indexTotal int64 indexTotal = 0 @@ -1732,18 +1816,21 @@ func queryRecommedDataSet(start_unix int64, end_unix int64) map[int64]int { sess.Find(&datasetList) log.Info("query datasetList size=" + fmt.Sprint(len(datasetList))) for _, datasetRecord := range datasetList { - if _, ok := userIdDdatasetMap[datasetRecord.UserID]; !ok { - userIdDdatasetMap[datasetRecord.UserID] = 1 - } else { - userIdDdatasetMap[datasetRecord.UserID] += 1 + if datasetRecord.Recommend { + if _, ok := userIdRecommentDatasetMap[datasetRecord.UserID]; !ok { + userIdRecommentDatasetMap[datasetRecord.UserID] = 1 + } else { + userIdRecommentDatasetMap[datasetRecord.UserID] += 1 + } } + userIdCreateDatasetMap[datasetRecord.UserID] = getMapValue(datasetRecord.UserID, userIdCreateDatasetMap) + 1 } indexTotal += PAGE_SIZE if indexTotal >= count { break } } - return userIdDdatasetMap + return userIdRecommentDatasetMap, userIdCreateDatasetMap } func queryAllDataSet() (map[int64]int64, map[int64]int64) { @@ -1922,22 +2009,23 @@ func queryImageStars(start_unix int64, end_unix int64) (map[int64]int, map[int64 return imageCollect, imageCollected } -func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int) { +func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int64]int, map[int64]int) { sess := x.NewSession() defer sess.Close() resultSizeMap := make(map[int64]int) resultNumMap := make(map[int64]int) + resultDownloadMap := make(map[int64]int) cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Attachment)) if err != nil { log.Info("query attachment error. return.") - return resultSizeMap, resultNumMap + return resultSizeMap, resultNumMap, resultDownloadMap } var indexTotal int64 indexTotal = 0 for { - sess.Select("id,uploader_id,size").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,uploader_id,size,download_count").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) attachmentList := make([]*Attachment, 0) sess.Find(&attachmentList) @@ -1946,9 +2034,11 @@ func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int6 if _, ok := resultSizeMap[attachRecord.UploaderID]; !ok { resultSizeMap[attachRecord.UploaderID] = int(attachRecord.Size / (1024 * 1024)) //MB resultNumMap[attachRecord.UploaderID] = 1 + resultDownloadMap[attachRecord.UploaderID] = int(attachRecord.DownloadCount) } else { resultSizeMap[attachRecord.UploaderID] += int(attachRecord.Size / (1024 * 1024)) //MB resultNumMap[attachRecord.UploaderID] += 1 + resultDownloadMap[attachRecord.UploaderID] += int(attachRecord.DownloadCount) } } @@ -1958,32 +2048,50 @@ func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int6 } } - return resultSizeMap, resultNumMap + return resultSizeMap, resultNumMap, resultDownloadMap } -func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { +func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[string]int, map[int64]string) { sess := x.NewSession() defer sess.Close() resultMap := make(map[int64]int) + detailInfoMap := make(map[string]int) + mostDownloadMap := make(map[int64]string) + cond := "is_fork=false and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) count, err := sess.Where(cond).Count(new(Repository)) if err != nil { log.Info("query Repository error. return.") - return resultMap + return resultMap, detailInfoMap, mostDownloadMap } var indexTotal int64 indexTotal = 0 for { - sess.Select("id,owner_id,name").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) + sess.Select("id,owner_id,name,is_private,clone_cnt").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) repoList := make([]*Repository, 0) sess.Find(&repoList) log.Info("query Repository size=" + fmt.Sprint(len(repoList))) for _, repoRecord := range repoList { - if _, ok := resultMap[repoRecord.OwnerID]; !ok { - resultMap[repoRecord.OwnerID] = 1 + resultMap[repoRecord.OwnerID] = getMapValue(repoRecord.OwnerID, resultMap) + 1 + + key := fmt.Sprint(repoRecord.OwnerID) + "_total" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 + + if repoRecord.IsPrivate { + key := fmt.Sprint(repoRecord.OwnerID) + "_is_private" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 } else { - resultMap[repoRecord.OwnerID] += 1 + key := fmt.Sprint(repoRecord.OwnerID) + "_is_public" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + 1 + } + key = fmt.Sprint(repoRecord.OwnerID) + "_total_download" + detailInfoMap[key] = getMapKeyStringValue(key, detailInfoMap) + int(repoRecord.CloneCnt) + + key = fmt.Sprint(repoRecord.OwnerID) + "_most_download" + if int(repoRecord.CloneCnt) > getMapKeyStringValue(key, detailInfoMap) { + detailInfoMap[key] = int(repoRecord.CloneCnt) + mostDownloadMap[repoRecord.OwnerID] = repoRecord.DisplayName() } } indexTotal += PAGE_SIZE @@ -1992,7 +2100,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { } } - return resultMap + return resultMap, detailInfoMap, mostDownloadMap } func queryUserRepoOpenIIndex(start_unix int64, end_unix int64) map[int64]float64 { @@ -2180,6 +2288,7 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap) } if cloudTaskRecord.Type == 1 { //npu + setMapKey("CloudBrainTwo", cloudTaskRecord.UserID, 1, resultItemMap) if cloudTaskRecord.JobType == "TRAIN" { setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) } else if cloudTaskRecord.JobType == "INFERENCE" { @@ -2187,14 +2296,32 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s } else { setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) } - } else { //type=0 gpu + } else if cloudTaskRecord.Type == 0 { //type=0 gpu + setMapKey("CloudBrainOne", cloudTaskRecord.UserID, 1, resultItemMap) if cloudTaskRecord.JobType == "TRAIN" { setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else if cloudTaskRecord.JobType == "INFERENCE" { + setMapKey("GpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap) } else if cloudTaskRecord.JobType == "BENCHMARK" { setMapKey("GpuBenchMarkJob", cloudTaskRecord.UserID, 1, resultItemMap) } else { setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) } + } else if cloudTaskRecord.Type == 2 { + setMapKey("C2Net", cloudTaskRecord.UserID, 1, resultItemMap) + if cloudTaskRecord.ComputeResource == NPUResource { + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("NpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("NpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } else if cloudTaskRecord.ComputeResource == GPUResource { + if cloudTaskRecord.JobType == "TRAIN" { + setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap) + } else { + setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap) + } + } } } indexTotal += PAGE_SIZE @@ -2274,3 +2401,26 @@ func subMonth(t1, t2 time.Time) (month int) { } return month } + +func GetContentFromPromote(url string) (string, error) { + defer func() { + if err := recover(); err != nil { + log.Info("not error.", err) + return + } + }() + resp, err := http.Get(url) + if err != nil || resp.StatusCode != 200 { + log.Info("Get organizations url error=" + err.Error()) + return "", err + } + + bytes, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + log.Info("Get organizations url error=" + err.Error()) + return "", err + } + allLineStr := string(bytes) + return allLineStr, nil +} diff --git a/models/user_business_struct.go b/models/user_business_struct.go index fe98be760..9dcc12342 100644 --- a/models/user_business_struct.go +++ b/models/user_business_struct.go @@ -2,6 +2,27 @@ package models import "code.gitea.io/gitea/modules/timeutil" +type UserSummaryCurrentYear struct { + ID int64 `xorm:"pk"` + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + Phone string `xorm:"NULL"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + DateCount int `xorm:"NOT NULL DEFAULT 0"` + MostActiveDay string `xorm:" NULL "` //08.05 + RepoInfo string `xorm:"varchar(1000)"` //创建了XX 个项目,公开项目XX 个,私有项目XX 个累计被下载XXX 次,其中《XXXXXXX 》项目,获得了最高XXX 次下载 + DataSetInfo string `xorm:"varchar(500)"` //创建了XX 个数据集,上传了XX 个数据集文件,累计被下载XX 次,被收藏XX 次 + CodeInfo string `xorm:"varchar(500)"` //代码提交次数,提交总代码行数,最晚的提交时间 + CloudBrainInfo string `xorm:"varchar(1000)"` //,创建了XX 个云脑任务,调试任务XX 个,训练任务XX 个,推理任务XX 个,累计运行了XXXX 卡时,累计节省xxxxx 元 + //这些免费的算力资源分别有,XX% 来自鹏城云脑1,XX% 来自鹏城云脑2,XX% 来自智算网络 + PlayARoll string `xorm:"varchar(500)"` //你参加了XX 次“我为开源打榜狂”活动,累计上榜XX 次,总共获得了社区XXX 元的激励 + + Label string `xorm:"varchar(500)"` +} + type UserBusinessAnalysisCurrentYear struct { ID int64 `xorm:"pk"` CountDate int64 `xorm:"pk"` @@ -505,3 +526,182 @@ type UserMetrics struct { ActivityUserJson string `xorm:"text NULL"` //激活用户列表 CurrentDayRegistUser int `xorm:"NOT NULL DEFAULT 0"` //当天注册用户 } + +type UserBusinessAnalysisAll struct { + ID int64 `xorm:"pk"` + + CountDate int64 `xorm:"pk"` + + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCommitRepo // 5 + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCreateIssue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + + //user + Email string `xorm:"NOT NULL"` + + //user + Name string `xorm:"NOT NULL"` + + DataDate string `xorm:"NULL"` + + //cloudbraintask + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` + + UserLocation string `xorm:"NULL"` + + FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` + CollectDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` + RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectImage int `xorm:"NOT NULL DEFAULT 0"` + CollectedImage int `xorm:"NOT NULL DEFAULT 0"` + RecommendImage int `xorm:"NOT NULL DEFAULT 0"` + + Phone string `xorm:"NULL"` + InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` +} + +type UserBusinessAnalysis struct { + ID int64 `xorm:"pk"` + DataDate string `xorm:"pk"` + CountDate int64 `xorm:"NULL"` + + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCommitRepo // 5 + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + + //action :ActionCreateIssue // 6 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + + //user + Email string `xorm:"NOT NULL"` + + //user + Name string `xorm:"NOT NULL"` + + CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` + GpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + NpuDebugJob int `xorm:"NOT NULL DEFAULT 0"` + GpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuTrainJob int `xorm:"NOT NULL DEFAULT 0"` + NpuInferenceJob int `xorm:"NOT NULL DEFAULT 0"` + GpuBenchMarkJob int `xorm:"NOT NULL DEFAULT 0"` + CloudBrainRunTime int `xorm:"NOT NULL DEFAULT 0"` + CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` + UserIndex float64 `xorm:"NOT NULL DEFAULT 0"` + UserIndexPrimitive float64 `xorm:"NOT NULL DEFAULT 0"` + + UserLocation string `xorm:"NULL"` + + FocusOtherUser int `xorm:"NOT NULL DEFAULT 0"` + CollectDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectedDataset int `xorm:"NOT NULL DEFAULT 0"` + RecommendDataset int `xorm:"NOT NULL DEFAULT 0"` + CollectImage int `xorm:"NOT NULL DEFAULT 0"` + CollectedImage int `xorm:"NOT NULL DEFAULT 0"` + RecommendImage int `xorm:"NOT NULL DEFAULT 0"` + + Phone string `xorm:"NULL"` + InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"` +} diff --git a/modules/grampus/resty.go b/modules/grampus/resty.go index f36721c85..a9e1aed5c 100755 --- a/modules/grampus/resty.go +++ b/modules/grampus/resty.go @@ -245,6 +245,32 @@ func GetTrainJobLog(jobID string) (string, error) { return logContent, nil } +func GetGrampusMetrics(jobID string) (models.GetTrainJobMetricStatisticResult, error) { + checkSetting() + client := getRestyClient() + var result models.GetTrainJobMetricStatisticResult + res, err := client.R(). + SetAuthToken(TOKEN). + Get(HOST + urlTrainJob + "/" + jobID + "/task/0/replica/0/metrics") + + if err != nil { + return result, fmt.Errorf("resty GetTrainJobLog: %v", err) + } + if err = json.Unmarshal([]byte(res.String()), &result); err != nil { + log.Error("GetGrampusMetrics json.Unmarshal failed(%s): %v", res.String(), err.Error()) + return result, fmt.Errorf("json.Unmarshal failed(%s): %v", res.String(), err.Error()) + } + if res.StatusCode() != http.StatusOK { + log.Error("Call GrampusMetrics failed(%d):%s(%s)", res.StatusCode(), result.ErrorCode, result.ErrorMsg) + return result, fmt.Errorf("Call GrampusMetrics failed(%d):%d(%s)", res.StatusCode(), result.ErrorCode, result.ErrorMsg) + } + if !result.IsSuccess { + log.Error("GetGrampusMetrics(%s) failed", jobID) + return result, fmt.Errorf("GetGrampusMetrics failed:%s", result.ErrorMsg) + } + return result, nil +} + func StopJob(jobID string) (*models.GrampusStopJobResponse, error) { checkSetting() client := getRestyClient() diff --git a/modules/setting/setting.go b/modules/setting/setting.go index bbc3dc167..e237d5a8a 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -1454,7 +1454,7 @@ func NewContext() { MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) TrainGpuTypes = sec.Key("TRAIN_GPU_TYPES").MustString("") TrainResourceSpecs = sec.Key("TRAIN_RESOURCE_SPECS").MustString("") - MaxModelSize = sec.Key("MAX_MODEL_SIZE").MustFloat64(500) + MaxModelSize = sec.Key("MAX_MODEL_SIZE").MustFloat64(200) InferenceGpuTypes = sec.Key("INFERENCE_GPU_TYPES").MustString("") InferenceResourceSpecs = sec.Key("INFERENCE_RESOURCE_SPECS").MustString("") SpecialPools = sec.Key("SPECIAL_POOL").MustString("") diff --git a/modules/storage/minio.go b/modules/storage/minio.go index 47f70e12d..a1a6e131a 100755 --- a/modules/storage/minio.go +++ b/modules/storage/minio.go @@ -144,8 +144,8 @@ func (m *MinioStorage) HasObject(path string) (bool, error) { // Indicate to our routine to exit cleanly upon return. defer close(doneCh) - - objectCh := m.client.ListObjects(m.bucket, m.buildMinioPath(path), false, doneCh) + //objectCh := m.client.ListObjects(m.bucket, m.buildMinioPath(path), false, doneCh) + objectCh := m.client.ListObjects(m.bucket, path, false, doneCh) for object := range objectCh { if object.Err != nil { return hasObject, object.Err diff --git a/modules/storage/minio_ext.go b/modules/storage/minio_ext.go index 4b738c068..d4a8abba5 100755 --- a/modules/storage/minio_ext.go +++ b/modules/storage/minio_ext.go @@ -3,7 +3,6 @@ package storage import ( "encoding/xml" "errors" - "path" "sort" "strconv" "strings" @@ -101,7 +100,7 @@ func getClients() (*minio_ext.Client, *miniov6.Core, error) { return client, core, nil } -func GenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) { +func GenMultiPartSignedUrl(objectName string, uploadId string, partNumber int, partSize int64) (string, error) { minioClient, _, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -110,7 +109,7 @@ func GenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSiz minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") return minioClient.GenUploadPartSignedUrl(uploadId, bucketName, objectName, partNumber, partSize, PresignedUploadPartUrlExpireTime, setting.Attachment.Minio.Location) } @@ -268,6 +267,23 @@ func MinioCopyFiles(bucketName string, srcPath string, destPath string, Files [] return fileTotalSize, nil } +func MinioCopyAFile(srcBucketName, srcObjectName, destBucketName, destObjectName string) (int64, error) { + _, core, err := getClients() + var fileTotalSize int64 + fileTotalSize = 0 + if err != nil { + log.Error("getClients failed:", err.Error()) + return fileTotalSize, err + } + meta, err := core.StatObject(srcBucketName, srcObjectName, miniov6.StatObjectOptions{}) + if err != nil { + log.Info("Get file error:" + err.Error()) + } + core.CopyObject(srcBucketName, srcObjectName, destBucketName, destObjectName, meta.UserMetadata) + fileTotalSize = meta.Size + return fileTotalSize, nil +} + func MinioPathCopy(bucketName string, srcPath string, destPath string) (int64, error) { _, core, err := getClients() var fileTotalSize int64 @@ -301,7 +317,7 @@ func MinioPathCopy(bucketName string, srcPath string, destPath string) (int64, e return fileTotalSize, nil } -func NewMultiPartUpload(uuid string) (string, error) { +func NewMultiPartUpload(objectName string) (string, error) { _, core, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -310,12 +326,12 @@ func NewMultiPartUpload(uuid string) (string, error) { minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{}) } -func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (string, error) { +func CompleteMultiPartUpload(objectName string, uploadID string, totalChunks int) (string, error) { client, core, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -324,8 +340,8 @@ func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (str minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") - + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + log.Info("bucketName=" + bucketName + " objectName=" + objectName + " uploadID=" + uploadID) partInfos, err := client.ListObjectParts(bucketName, objectName, uploadID) if err != nil { log.Error("ListObjectParts failed:", err.Error()) @@ -351,7 +367,7 @@ func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (str return core.CompleteMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload.Parts) } -func GetPartInfos(uuid string, uploadID string) (string, error) { +func GetPartInfos(objectName string, uploadID string) (string, error) { minioClient, _, err := getClients() if err != nil { log.Error("getClients failed:", err.Error()) @@ -360,7 +376,7 @@ func GetPartInfos(uuid string, uploadID string) (string, error) { minio := setting.Attachment.Minio bucketName := minio.Bucket - objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + //objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") partInfos, err := minioClient.ListObjectParts(bucketName, objectName, uploadID) if err != nil { diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 57ef63029..83b03ed44 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -90,17 +90,16 @@ func listAllParts(uuid, uploadID, key string) (output *obs.ListPartsOutput, err } else { continue } - - break } return output, nil } -func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) { - key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") +func GetObsPartInfos(objectName, uploadID string) (string, error) { + key := objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") - allParts, err := listAllParts(uuid, uploadID, key) + allParts, err := listAllParts(objectName, uploadID, key) if err != nil { log.Error("listAllParts failed: %v", err) return "", err @@ -114,10 +113,11 @@ func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) { return chunks, nil } -func NewObsMultiPartUpload(uuid, fileName string) (string, error) { +func NewObsMultiPartUpload(objectName string) (string, error) { input := &obs.InitiateMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") output, err := ObsCli.InitiateMultipartUpload(input) if err != nil { @@ -128,13 +128,14 @@ func NewObsMultiPartUpload(uuid, fileName string) (string, error) { return output.UploadId, nil } -func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int) error { +func CompleteObsMultiPartUpload(objectName, uploadID string, totalChunks int) error { input := &obs.CompleteMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + //input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName input.UploadId = uploadID - allParts, err := listAllParts(uuid, uploadID, input.Key) + allParts, err := listAllParts(objectName, uploadID, input.Key) if err != nil { log.Error("listAllParts failed: %v", err) return err @@ -153,15 +154,16 @@ func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int return err } - log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId) + log.Info("uuid:%s, RequestId:%s", objectName, output.RequestId) return nil } -func ObsMultiPartUpload(uuid string, uploadId string, partNumber int, fileName string, putBody io.ReadCloser) error { +func ObsMultiPartUpload(objectName string, uploadId string, partNumber int, fileName string, putBody io.ReadCloser) error { input := &obs.UploadPartInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.UploadId = uploadId input.PartNumber = partNumber input.Body = putBody @@ -241,11 +243,6 @@ func ObsDownloadAFile(bucket string, key string) (io.ReadCloser, error) { } } -func ObsDownload(uuid string, fileName string) (io.ReadCloser, error) { - - return ObsDownloadAFile(setting.Bucket, strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")) -} - func ObsModelDownload(JobName string, fileName string) (io.ReadCloser, error) { input := &obs.GetObjectInput{} input.Bucket = setting.Bucket @@ -297,7 +294,7 @@ func ObsCopyManyFile(srcBucket string, srcPath string, destBucket string, destPa log.Info("Get File error, error=" + err.Error()) continue } - obsCopyFile(srcBucket, srcKey, destBucket, destKey) + ObsCopyFile(srcBucket, srcKey, destBucket, destKey) fileTotalSize += out.ContentLength } @@ -321,7 +318,7 @@ func ObsCopyAllFile(srcBucket string, srcPath string, destBucket string, destPat index++ for _, val := range output.Contents { destKey := destPath + val.Key[length:] - obsCopyFile(srcBucket, val.Key, destBucket, destKey) + ObsCopyFile(srcBucket, val.Key, destBucket, destKey) fileTotalSize += val.Size } if output.IsTruncated { @@ -340,7 +337,7 @@ func ObsCopyAllFile(srcBucket string, srcPath string, destBucket string, destPat return fileTotalSize, nil } -func obsCopyFile(srcBucket string, srcKeyName string, destBucket string, destKeyName string) error { +func ObsCopyFile(srcBucket string, srcKeyName string, destBucket string, destKeyName string) error { input := &obs.CopyObjectInput{} input.Bucket = destBucket input.Key = destKeyName @@ -529,11 +526,12 @@ func GetObsListObject(jobName, outPutPath, parentDir, versionName string) ([]Fil } } -func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, fileName string) (string, error) { +func ObsGenMultiPartSignedUrl(objectName string, uploadId string, partNumber int) (string, error) { input := &obs.CreateSignedUrlInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.Expires = 60 * 60 input.Method = obs.HttpMethodPut @@ -581,10 +579,11 @@ func GetObsCreateSignedUrl(jobName, parentDir, fileName string) (string, error) return GetObsCreateSignedUrlByBucketAndKey(setting.Bucket, strings.TrimPrefix(path.Join(setting.TrainJobModelPath, jobName, setting.OutPutPath, parentDir, fileName), "/")) } -func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { +func ObsGetPreSignedUrl(objectName, fileName string) (string, error) { input := &obs.CreateSignedUrlInput{} input.Method = obs.HttpMethodGet - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + input.Key = objectName + //strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") input.Bucket = setting.Bucket input.Expires = 60 * 60 diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index e8e8722e2..9a16ae0ff 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -1268,12 +1268,14 @@ model.manage.model_accuracy = Model Accuracy model.convert=Model Transformation model.list=Model List model.manage.create_new_convert_task=Create Model Transformation Task - +model.manage.import_local_model=Import Local Model +model.manage.import_online_model=Import Online Model model.manage.notcreatemodel=No model has been created model.manage.init1=Code version: You have not initialized the code repository, please model.manage.init2=initialized first ; model.manage.createtrainjob_tip=Training task: you haven't created a training task, please create it first -model.manage.createtrainjob=Training task. +model.manage.createmodel_tip=You can import local model or online model. Import online model should +model.manage.createtrainjob=Create training task. model.manage.delete=Delete Model model.manage.delete_confirm=Are you sure to delete this model? Once this model is deleted, it cannot be restored. model.manage.select.trainjob=Select train task diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index d13c99443..ce179949a 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -1283,12 +1283,14 @@ model.manage.model_accuracy = 模型精度 model.convert=模型转换任务 model.list=模型列表 model.manage.create_new_convert_task=创建模型转换任务 - +model.manage.import_local_model=导入本地模型 +model.manage.import_online_model=导入线上模型 model.manage.notcreatemodel=未创建过模型 model.manage.init1=代码版本:您还没有初始化代码仓库,请先 model.manage.init2=创建代码版本; model.manage.createtrainjob_tip=训练任务:您还没创建过训练任务,请先创建 -model.manage.createtrainjob=训练任务。 +model.manage.createmodel_tip=您可以导入本地模型或者导入线上模型。导入线上模型需先 +model.manage.createtrainjob=创建训练任务。 model.manage.delete=删除模型 model.manage.delete_confirm=你确认删除该模型么?此模型一旦删除不可恢复。 model.manage.select.trainjob=选择训练任务 diff --git a/routers/admin/resources.go b/routers/admin/resources.go index 026c37e52..feea7b69b 100644 --- a/routers/admin/resources.go +++ b/routers/admin/resources.go @@ -127,6 +127,7 @@ func GetResourceSpecificationList(ctx *context.Context) { Status: status, Cluster: cluster, AvailableCode: available, + OrderBy: models.SearchSpecOrderById, }) if err != nil { log.Error("GetResourceSpecificationList error.%v", err) diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 455057aeb..2afbb9b7d 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -1012,7 +1012,9 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/query_modelfile_for_predict", repo.QueryModelFileForPredict) m.Get("/query_train_model", repo.QueryTrainModelList) m.Post("/create_model_convert", repo.CreateModelConvert) - m.Get("/show_model_convert_page") + m.Get("/show_model_convert_page", repo.ShowModelConvertPage) + m.Get("/query_model_convert_byId", repo.QueryModelConvertById) + m.Get("/:id", repo.GetCloudbrainModelConvertTask) m.Get("/:id/log", repo.CloudbrainForModelConvertGetLog) m.Get("/:id/modelartlog", repo.TrainJobForModelConvertGetLog) @@ -1049,6 +1051,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", repo.GetModelArtsTrainJobVersion) m.Post("/stop_version", cloudbrain.AdminOrOwnerOrJobCreaterRightForTrain, repo_ext.GrampusStopJob) m.Get("/log", repo_ext.GrampusGetLog) + m.Get("/metrics", repo_ext.GrampusMetrics) m.Get("/download_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo_ext.GrampusDownloadLog) }) }) diff --git a/routers/api/v1/repo/cloudbrain.go b/routers/api/v1/repo/cloudbrain.go index 2e25fdefe..7022dc011 100755 --- a/routers/api/v1/repo/cloudbrain.go +++ b/routers/api/v1/repo/cloudbrain.go @@ -634,7 +634,7 @@ func CloudbrainGetLog(ctx *context.APIContext) { endLine += 1 } } - + result = getLogFromModelDir(job.JobName, startLine, endLine, resultPath) if result == nil { log.Error("GetJobLog failed: %v", err, ctx.Data["MsgID"]) @@ -649,14 +649,20 @@ func CloudbrainGetLog(ctx *context.APIContext) { if ctx.Data["existStr"] != nil && result["Lines"].(int) < 50 { content = content + ctx.Data["existStr"].(string) } + logFileName := result["FileName"] + + //Logs can only be downloaded if the file exists + //and the current user is an administrator or the creator of the task + canLogDownload := logFileName != nil && logFileName != "" && job.IsUserHasRight(ctx.User) + re := map[string]interface{}{ "JobID": ID, - "LogFileName": result["FileName"], + "LogFileName": logFileName, "StartLine": result["StartLine"], "EndLine": result["EndLine"], "Content": content, "Lines": result["Lines"], - "CanLogDownload": result["FileName"] != "", + "CanLogDownload": canLogDownload, "StartTime": job.StartTime, } //result := CloudbrainGetLogByJobId(job.JobID, job.JobName) diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index d937892b6..e0db9eda3 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -280,15 +280,6 @@ func TrainJobGetLog(ctx *context.APIContext) { return } - prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, modelarts.LogPath, versionName), "/") + "/job" - _, err = storage.GetObsLogFileName(prefix) - var canLogDownload bool - if err != nil { - canLogDownload = false - } else { - canLogDownload = true - } - ctx.Data["log_file_name"] = resultLogFile.LogFileList[0] ctx.JSON(http.StatusOK, map[string]interface{}{ @@ -298,11 +289,23 @@ func TrainJobGetLog(ctx *context.APIContext) { "EndLine": result.EndLine, "Content": result.Content, "Lines": result.Lines, - "CanLogDownload": canLogDownload, + "CanLogDownload": canLogDownload(ctx.User, task), "StartTime": task.StartTime, }) } +func canLogDownload(user *models.User, task *models.Cloudbrain) bool { + if task == nil || !task.IsUserHasRight(user) { + return false + } + prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, modelarts.LogPath, task.VersionName), "/") + "/job" + _, err := storage.GetObsLogFileName(prefix) + if err != nil { + return false + } + return true +} + func trainJobGetLogContent(jobID string, versionID int64, baseLine string, order string, lines int) (*models.GetTrainJobLogFileNamesResult, *models.GetTrainJobLogResult, error) { resultLogFile, err := modelarts.GetTrainJobLogFileNames(jobID, strconv.FormatInt(versionID, 10)) diff --git a/routers/api/v1/repo/modelmanage.go b/routers/api/v1/repo/modelmanage.go index 2c1fd9f01..15260790d 100644 --- a/routers/api/v1/repo/modelmanage.go +++ b/routers/api/v1/repo/modelmanage.go @@ -104,3 +104,12 @@ func ShowModelConvertPage(ctx *context.APIContext) { } } + +func QueryModelConvertById(ctx *context.APIContext) { + modelResult, err := routerRepo.GetModelConvertById(ctx.Context) + if err == nil { + ctx.JSON(http.StatusOK, modelResult) + } else { + ctx.JSON(http.StatusOK, nil) + } +} diff --git a/routers/repo/ai_model_convert.go b/routers/repo/ai_model_convert.go index 560ace8fd..962c76aae 100644 --- a/routers/repo/ai_model_convert.go +++ b/routers/repo/ai_model_convert.go @@ -150,6 +150,7 @@ func SaveModelConvert(ctx *context.Context) { go goCreateTask(modelConvert, ctx, task) ctx.JSON(200, map[string]string{ + "id": id, "code": "0", }) } @@ -726,6 +727,11 @@ func ShowModelConvertPageInfo(ctx *context.Context) { } } +func GetModelConvertById(ctx *context.Context) (*models.AiModelConvert, error) { + id := ctx.Query("id") + return models.QueryModelConvertById(id) +} + func GetModelConvertPageData(ctx *context.Context) ([]*models.AiModelConvert, int64, error) { page := ctx.QueryInt("page") if page <= 0 { diff --git a/routers/repo/ai_model_manage.go b/routers/repo/ai_model_manage.go index 1bef11703..7eedb9bc4 100644 --- a/routers/repo/ai_model_manage.go +++ b/routers/repo/ai_model_manage.go @@ -22,16 +22,24 @@ import ( ) const ( - Model_prefix = "aimodels/" - tplModelManageIndex = "repo/modelmanage/index" - tplModelManageDownload = "repo/modelmanage/download" - tplModelInfo = "repo/modelmanage/showinfo" - MODEL_LATEST = 1 - MODEL_NOT_LATEST = 0 - MODEL_MAX_SIZE = 1024 * 1024 * 1024 - STATUS_COPY_MODEL = 1 - STATUS_FINISHED = 0 - STATUS_ERROR = 2 + Attachment_model = "model" + Model_prefix = "aimodels/" + tplModelManageIndex = "repo/modelmanage/index" + tplModelManageDownload = "repo/modelmanage/download" + tplModelInfo = "repo/modelmanage/showinfo" + tplCreateLocalModelInfo = "repo/modelmanage/create_local_1" + tplCreateLocalForUploadModelInfo = "repo/modelmanage/create_local_2" + tplCreateOnlineModelInfo = "repo/modelmanage/create_online" + + MODEL_LATEST = 1 + MODEL_NOT_LATEST = 0 + MODEL_MAX_SIZE = 1024 * 1024 * 1024 + STATUS_COPY_MODEL = 1 + STATUS_FINISHED = 0 + STATUS_ERROR = 2 + + MODEL_LOCAL_TYPE = 1 + MODEL_ONLINE_TYPE = 0 ) func saveModelByParameters(jobId string, versionName string, name string, version string, label string, description string, engine int, ctx *context.Context) (string, error) { @@ -70,13 +78,12 @@ func saveModelByParameters(jobId string, versionName string, name string, versio cloudType = models.TypeCloudBrainTwo } else if aiTask.ComputeResource == models.GPUResource { cloudType = models.TypeCloudBrainOne - spec, err := resource.GetCloudbrainSpec(aiTask.ID) - if err == nil { - flaverName := "GPU: " + fmt.Sprint(spec.AccCardsNum) + "*" + spec.AccCardType + ",CPU: " + fmt.Sprint(spec.CpuCores) + "," + ctx.Tr("cloudbrain.memory") + ": " + fmt.Sprint(spec.MemGiB) + "GB," + ctx.Tr("cloudbrain.shared_memory") + ": " + fmt.Sprint(spec.ShareMemGiB) + "GB" - aiTask.FlavorName = flaverName - } } - + spec, err := resource.GetCloudbrainSpec(aiTask.ID) + if err == nil { + specJson, _ := json.Marshal(spec) + aiTask.FlavorName = string(specJson) + } accuracy := make(map[string]string) accuracy["F1"] = "" accuracy["Recall"] = "" @@ -189,6 +196,139 @@ func SaveNewNameModel(ctx *context.Context) { log.Info("save model end.") } +func SaveLocalModel(ctx *context.Context) { + if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { + ctx.Error(403, ctx.Tr("repo.model_noright")) + return + } + re := map[string]string{ + "code": "-1", + } + log.Info("save SaveLocalModel start.") + uuid := uuid.NewV4() + id := uuid.String() + name := ctx.Query("name") + version := ctx.Query("version") + if version == "" { + version = "0.0.1" + } + label := ctx.Query("label") + description := ctx.Query("description") + engine := ctx.QueryInt("engine") + taskType := ctx.QueryInt("type") + modelActualPath := "" + if taskType == models.TypeCloudBrainOne { + destKeyNamePrefix := Model_prefix + models.AttachmentRelativePath(id) + "/" + modelActualPath = setting.Attachment.Minio.Bucket + "/" + destKeyNamePrefix + } else if taskType == models.TypeCloudBrainTwo { + destKeyNamePrefix := Model_prefix + models.AttachmentRelativePath(id) + "/" + modelActualPath = setting.Bucket + "/" + destKeyNamePrefix + } else { + re["msg"] = "type is error." + ctx.JSON(200, re) + return + } + var lastNewModelId string + repoId := ctx.Repo.Repository.ID + aimodels := models.QueryModelByName(name, repoId) + if len(aimodels) > 0 { + for _, model := range aimodels { + if model.Version == version { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + if model.New == MODEL_LATEST { + lastNewModelId = model.ID + } + } + } + model := &models.AiModelManage{ + ID: id, + Version: version, + ModelType: MODEL_LOCAL_TYPE, + VersionCount: len(aimodels) + 1, + Label: label, + Name: name, + Description: description, + New: MODEL_LATEST, + Type: taskType, + Path: modelActualPath, + Size: 0, + AttachmentId: "", + RepoId: repoId, + UserId: ctx.User.ID, + Engine: int64(engine), + TrainTaskInfo: "", + Accuracy: "", + Status: STATUS_FINISHED, + } + + err := models.SaveModelToDb(model) + if err != nil { + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } + if len(lastNewModelId) > 0 { + //udpate status and version count + models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) + } + var units []models.RepoUnit + var deleteUnitTypes []models.UnitType + units = append(units, models.RepoUnit{ + RepoID: ctx.Repo.Repository.ID, + Type: models.UnitTypeModelManage, + Config: &models.ModelManageConfig{ + EnableModelManage: true, + }, + }) + deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeModelManage) + + models.UpdateRepositoryUnits(ctx.Repo.Repository, units, deleteUnitTypes) + + log.Info("save model end.") + notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, id, name, models.ActionCreateNewModelTask) + re["code"] = "0" + re["id"] = id + ctx.JSON(200, re) +} + +func getSize(files []storage.FileInfo) int64 { + var size int64 + for _, file := range files { + size += file.Size + } + return size +} + +func UpdateModelSize(modeluuid string) { + model, err := models.QueryModelById(modeluuid) + if err == nil { + if model.Type == models.TypeCloudBrainOne { + if strings.HasPrefix(model.Path, setting.Attachment.Minio.Bucket+"/"+Model_prefix) { + files, err := storage.GetAllObjectByBucketAndPrefixMinio(setting.Attachment.Minio.Bucket, model.Path[len(setting.Attachment.Minio.Bucket)+1:]) + if err != nil { + log.Info("Failed to query model size from minio. id=" + modeluuid) + } + size := getSize(files) + models.ModifyModelSize(modeluuid, size) + } + } else if model.Type == models.TypeCloudBrainTwo { + if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { + files, err := storage.GetAllObjectByBucketAndPrefix(setting.Bucket, model.Path[len(setting.Bucket)+1:]) + if err != nil { + log.Info("Failed to query model size from obs. id=" + modeluuid) + } + size := getSize(files) + models.ModifyModelSize(modeluuid, size) + } + } + } else { + log.Info("not found model,uuid=" + modeluuid) + } +} + func SaveModel(ctx *context.Context) { if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { ctx.Error(403, ctx.Tr("repo.model_noright")) @@ -292,6 +432,60 @@ func downloadModelFromCloudBrainOne(modelUUID string, jobName string, parentDir return "", 0, nil } } +func DeleteModelFile(ctx *context.Context) { + log.Info("delete model start.") + id := ctx.Query("id") + fileName := ctx.Query("fileName") + model, err := models.QueryModelById(id) + if err == nil { + if model.ModelType == MODEL_LOCAL_TYPE { + if model.Type == models.TypeCloudBrainOne { + bucketName := setting.Attachment.Minio.Bucket + objectName := model.Path[len(bucketName)+1:] + fileName + log.Info("delete bucket=" + bucketName + " path=" + objectName) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + totalSize := storage.MinioGetFilesSize(bucketName, []string{objectName}) + err := storage.Attachments.DeleteDir(objectName) + if err != nil { + log.Info("Failed to delete model. id=" + id) + re := map[string]string{ + "code": "-1", + } + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } else { + log.Info("delete minio file size is:" + fmt.Sprint(totalSize)) + models.ModifyModelSize(id, model.Size-totalSize) + } + } + } else if model.Type == models.TypeCloudBrainTwo { + bucketName := setting.Bucket + objectName := model.Path[len(setting.Bucket)+1:] + fileName + log.Info("delete bucket=" + setting.Bucket + " path=" + objectName) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + totalSize := storage.ObsGetFilesSize(bucketName, []string{objectName}) + err := storage.ObsRemoveObject(bucketName, objectName) + if err != nil { + log.Info("Failed to delete model. id=" + id) + re := map[string]string{ + "code": "-1", + } + re["msg"] = err.Error() + ctx.JSON(200, re) + return + } else { + log.Info("delete obs file size is:" + fmt.Sprint(totalSize)) + models.ModifyModelSize(id, model.Size-totalSize) + } + } + } + } + } + ctx.JSON(200, map[string]string{ + "code": "0", + }) +} func DeleteModel(ctx *context.Context) { log.Info("delete model start.") @@ -317,14 +511,28 @@ func deleteModelByID(ctx *context.Context, id string) error { return errors.New(ctx.Tr("repo.model_noright")) } if err == nil { - log.Info("bucket=" + setting.Bucket + " path=" + model.Path) - if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { - err := storage.ObsRemoveObject(setting.Bucket, model.Path[len(setting.Bucket)+1:]) - if err != nil { - log.Info("Failed to delete model. id=" + id) - return err + + if model.Type == models.TypeCloudBrainOne { + bucketName := setting.Attachment.Minio.Bucket + log.Info("bucket=" + bucketName + " path=" + model.Path) + if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { + err := storage.Attachments.DeleteDir(model.Path[len(bucketName)+1:]) + if err != nil { + log.Info("Failed to delete model. id=" + id) + return err + } + } + } else if model.Type == models.TypeCloudBrainTwo { + log.Info("bucket=" + setting.Bucket + " path=" + model.Path) + if strings.HasPrefix(model.Path, setting.Bucket+"/"+Model_prefix) { + err := storage.ObsRemoveObject(setting.Bucket, model.Path[len(setting.Bucket)+1:]) + if err != nil { + log.Info("Failed to delete model. id=" + id) + return err + } } } + err = models.DeleteModelById(id) if err == nil { //find a model to change new aimodels := models.QueryModelByName(model.Name, model.RepoId) @@ -884,29 +1092,58 @@ func ModifyModel(id string, description string) error { func ModifyModelInfo(ctx *context.Context) { log.Info("modify model start.") id := ctx.Query("id") - description := ctx.Query("description") - + re := map[string]string{ + "code": "-1", + } task, err := models.QueryModelById(id) if err != nil { + re["msg"] = err.Error() log.Error("no such model!", err.Error()) - ctx.ServerError("no such model:", err) + ctx.JSON(200, re) return } if !isOper(ctx, task.UserId) { - ctx.NotFound(ctx.Req.URL.RequestURI(), nil) - //ctx.ServerError("no right.", errors.New(ctx.Tr("repo.model_noright"))) + re["msg"] = "No right to operation." + ctx.JSON(200, re) return } + if task.ModelType == MODEL_LOCAL_TYPE { + name := ctx.Query("name") + label := ctx.Query("label") + description := ctx.Query("description") + engine := ctx.QueryInt("engine") + aimodels := models.QueryModelByName(name, task.RepoId) + if aimodels != nil && len(aimodels) > 0 { + if len(aimodels) == 1 { + if aimodels[0].ID != task.ID { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + } else { + re["msg"] = ctx.Tr("repo.model.manage.create_error") + ctx.JSON(200, re) + return + } + } + err = models.ModifyLocalModel(id, name, label, description, engine) - err = ModifyModel(id, description) + } else { + label := ctx.Query("label") + description := ctx.Query("description") + engine := task.Engine + name := task.Name + err = models.ModifyLocalModel(id, name, label, description, int(engine)) + } if err != nil { - log.Info("modify error," + err.Error()) - ctx.ServerError("error.", err) + re["msg"] = err.Error() + ctx.JSON(200, re) + return } else { - ctx.JSON(200, "success") + re["code"] = "0" + ctx.JSON(200, re) } - } func QueryModelListForPredict(ctx *context.Context) { @@ -1004,3 +1241,25 @@ func QueryOneLevelModelFile(ctx *context.Context) { ctx.JSON(http.StatusOK, fileinfos) } } + +func CreateLocalModel(ctx *context.Context) { + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + + ctx.HTML(200, tplCreateLocalModelInfo) +} + +func CreateLocalModelForUpload(ctx *context.Context) { + ctx.Data["uuid"] = ctx.Query("uuid") + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + ctx.Data["max_model_size"] = setting.MaxModelSize * MODEL_MAX_SIZE + ctx.HTML(200, tplCreateLocalForUploadModelInfo) +} + +func CreateOnlineModel(ctx *context.Context) { + ctx.Data["isModelManage"] = true + ctx.Data["ModelManageAccess"] = ctx.Repo.CanWrite(models.UnitTypeModelManage) + + ctx.HTML(200, tplCreateOnlineModelInfo) +} diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index dc2c417e4..240e78acc 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -11,6 +11,7 @@ import ( "fmt" "mime/multipart" "net/http" + "path" "strconv" "strings" @@ -311,7 +312,8 @@ func GetAttachment(ctx *context.Context) { url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name log.Info("return url=" + url) } else { - url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(attach.UUID[0:1], attach.UUID[1:2], attach.UUID, attach.Name)), "/") + url, err = storage.ObsGetPreSignedUrl(objectName, attach.Name) if err != nil { ctx.ServerError("ObsGetPreSignedUrl", err) return @@ -415,7 +417,7 @@ func AddAttachment(ctx *context.Context) { uuid := ctx.Query("uuid") has := false if typeCloudBrain == models.TypeCloudBrainOne { - has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + has, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(uuid)) if err != nil { ctx.ServerError("HasObject", err) return @@ -557,7 +559,7 @@ func GetSuccessChunks(ctx *context.Context) { isExist := false if typeCloudBrain == models.TypeCloudBrainOne { - isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) + isExist, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID)) if err != nil { ctx.ServerError("HasObject failed", err) return @@ -593,12 +595,12 @@ func GetSuccessChunks(ctx *context.Context) { } if typeCloudBrain == models.TypeCloudBrainOne { - chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) + chunks, err = storage.GetPartInfos(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), fileChunk.UploadID) if err != nil { log.Error("GetPartInfos failed:%v", err.Error()) } } else { - chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID, fileName) + chunks, err = storage.GetObsPartInfos(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), fileChunk.UploadID) if err != nil { log.Error("GetObsPartInfos failed:%v", err.Error()) } @@ -699,13 +701,13 @@ func NewMultipart(ctx *context.Context) { uuid := gouuid.NewV4().String() var uploadID string if typeCloudBrain == models.TypeCloudBrainOne { - uploadID, err = storage.NewMultiPartUpload(uuid) + uploadID, err = storage.NewMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")) if err != nil { ctx.ServerError("NewMultipart", err) return } } else { - uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName) + uploadID, err = storage.NewObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")) if err != nil { ctx.ServerError("NewObsMultiPartUpload", err) return @@ -749,8 +751,8 @@ func PutOBSProxyUpload(ctx *context.Context) { ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody)) return } - - err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser()) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + err := storage.ObsMultiPartUpload(objectName, uploadID, partNumber, fileName, RequestBody.ReadCloser()) if err != nil { log.Info("upload error.") } @@ -759,8 +761,8 @@ func PutOBSProxyUpload(ctx *context.Context) { func GetOBSProxyDownload(ctx *context.Context) { uuid := ctx.Query("uuid") fileName := ctx.Query("file_name") - - body, err := storage.ObsDownload(uuid, fileName) + objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/") + body, err := storage.ObsDownloadAFile(setting.Bucket, objectName) if err != nil { log.Info("upload error.") } else { @@ -805,7 +807,7 @@ func GetMultipartUploadUrl(ctx *context.Context) { return } - url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + url, err = storage.GenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"), uploadID, partNumber, size) if err != nil { ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) return @@ -815,7 +817,7 @@ func GetMultipartUploadUrl(ctx *context.Context) { url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName log.Info("return url=" + url) } else { - url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName) + url, err = storage.ObsGenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"), uploadID, partNumber) if err != nil { ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) return @@ -823,7 +825,6 @@ func GetMultipartUploadUrl(ctx *context.Context) { log.Info("url=" + url) } } - ctx.JSON(200, map[string]string{ "url": url, }) @@ -855,13 +856,13 @@ func CompleteMultipart(ctx *context.Context) { } if typeCloudBrain == models.TypeCloudBrainOne { - _, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.TotalChunks) + _, err = storage.CompleteMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), uploadID, fileChunk.TotalChunks) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) return } } else { - err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName, fileChunk.TotalChunks) + err = storage.CompleteObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), uploadID, fileChunk.TotalChunks) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) return @@ -1013,7 +1014,7 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) { } for _, attch := range attachs { - has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID)) + has, err := storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID)) if err != nil || !has { continue } diff --git a/routers/repo/attachment_model.go b/routers/repo/attachment_model.go new file mode 100644 index 000000000..efc7cbe08 --- /dev/null +++ b/routers/repo/attachment_model.go @@ -0,0 +1,323 @@ +package repo + +import ( + "fmt" + "path" + "strconv" + "strings" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/minio_ext" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/storage" + "code.gitea.io/gitea/modules/upload" + gouuid "github.com/satori/go.uuid" +) + +func GetModelChunks(ctx *context.Context) { + fileMD5 := ctx.Query("md5") + typeCloudBrain := ctx.QueryInt("type") + fileName := ctx.Query("file_name") + scene := ctx.Query("scene") + modeluuid := ctx.Query("modeluuid") + log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain)) + var chunks string + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain, modeluuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.JSON(200, map[string]string{ + "uuid": "", + "uploaded": "0", + "uploadID": "", + "chunks": "", + }) + } else { + ctx.ServerError("GetFileChunkByMD5", err) + } + return + } + + isExist := false + if typeCloudBrain == models.TypeCloudBrainOne { + isExist, err = storage.Attachments.HasObject(fileChunk.ObjectName) + if isExist { + log.Info("The file is exist in minio. has uploaded.path=" + fileChunk.ObjectName) + } else { + log.Info("The file is not exist in minio..") + } + if err != nil { + ctx.ServerError("HasObject failed", err) + return + } + } else { + isExist, err = storage.ObsHasObject(fileChunk.ObjectName) + if isExist { + log.Info("The file is exist in obs. has uploaded. path=" + fileChunk.ObjectName) + } else { + log.Info("The file is not exist in obs.") + } + if err != nil { + ctx.ServerError("ObsHasObject failed", err) + return + } + } + + if isExist { + if fileChunk.IsUploaded == models.FileNotUploaded { + log.Info("the file has been uploaded but not recorded") + fileChunk.IsUploaded = models.FileUploaded + if err = models.UpdateModelFileChunk(fileChunk); err != nil { + log.Error("UpdateFileChunk failed:", err.Error()) + } + } + modelname := "" + model, err := models.QueryModelById(modeluuid) + if err == nil && model != nil { + modelname = model.Name + } + ctx.JSON(200, map[string]string{ + "uuid": fileChunk.UUID, + "uploaded": strconv.Itoa(fileChunk.IsUploaded), + "uploadID": fileChunk.UploadID, + "chunks": string(chunks), + "attachID": "0", + "modeluuid": modeluuid, + "fileName": fileName, + "modelName": modelname, + }) + } else { + if fileChunk.IsUploaded == models.FileUploaded { + log.Info("the file has been recorded but not uploaded") + fileChunk.IsUploaded = models.FileNotUploaded + if err = models.UpdateModelFileChunk(fileChunk); err != nil { + log.Error("UpdateFileChunk failed:", err.Error()) + } + } + + if typeCloudBrain == models.TypeCloudBrainOne { + chunks, err = storage.GetPartInfos(fileChunk.ObjectName, fileChunk.UploadID) + if err != nil { + log.Error("GetPartInfos failed:%v", err.Error()) + } + } else { + chunks, err = storage.GetObsPartInfos(fileChunk.ObjectName, fileChunk.UploadID) + if err != nil { + log.Error("GetObsPartInfos failed:%v", err.Error()) + } + } + if err != nil { + models.DeleteModelFileChunk(fileChunk) + ctx.JSON(200, map[string]string{ + "uuid": "", + "uploaded": "0", + "uploadID": "", + "chunks": "", + }) + } else { + ctx.JSON(200, map[string]string{ + "uuid": fileChunk.UUID, + "uploaded": strconv.Itoa(fileChunk.IsUploaded), + "uploadID": fileChunk.UploadID, + "chunks": string(chunks), + "attachID": "0", + "datasetID": "0", + "fileName": "", + "datasetName": "", + }) + } + } +} + +func getObjectName(filename string, modeluuid string) string { + return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/") +} + +func NewModelMultipart(ctx *context.Context) { + if !setting.Attachment.Enabled { + ctx.Error(404, "attachment is not enabled") + return + } + fileName := ctx.Query("file_name") + modeluuid := ctx.Query("modeluuid") + + err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ",")) + if err != nil { + ctx.Error(400, err.Error()) + return + } + + typeCloudBrain := ctx.QueryInt("type") + err = checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + if setting.Attachment.StoreType == storage.MinioStorageType { + totalChunkCounts := ctx.QueryInt("totalChunkCounts") + if totalChunkCounts > minio_ext.MaxPartsCount { + ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts)) + return + } + + fileSize := ctx.QueryInt64("size") + if fileSize > minio_ext.MaxMultipartPutObjectSize { + ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize)) + return + } + + uuid := gouuid.NewV4().String() + var uploadID string + var objectName string + if typeCloudBrain == models.TypeCloudBrainOne { + objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/") + uploadID, err = storage.NewMultiPartUpload(objectName) + if err != nil { + ctx.ServerError("NewMultipart", err) + return + } + } else { + + objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/") + uploadID, err = storage.NewObsMultiPartUpload(objectName) + if err != nil { + ctx.ServerError("NewObsMultiPartUpload", err) + return + } + } + + _, err = models.InsertModelFileChunk(&models.ModelFileChunk{ + UUID: uuid, + UserID: ctx.User.ID, + UploadID: uploadID, + Md5: ctx.Query("md5"), + Size: fileSize, + ObjectName: objectName, + ModelUUID: modeluuid, + TotalChunks: totalChunkCounts, + Type: typeCloudBrain, + }) + + if err != nil { + ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err)) + return + } + + ctx.JSON(200, map[string]string{ + "uuid": uuid, + "uploadID": uploadID, + }) + } else { + ctx.Error(404, "storage type is not enabled") + return + } +} + +func GetModelMultipartUploadUrl(ctx *context.Context) { + uuid := ctx.Query("uuid") + uploadID := ctx.Query("uploadID") + partNumber := ctx.QueryInt("chunkNumber") + size := ctx.QueryInt64("size") + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + fileChunk, err := models.GetModelFileChunkByUUID(uuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.Error(404) + } else { + ctx.ServerError("GetFileChunkByUUID", err) + } + return + } + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + if size > minio_ext.MinPartSize { + ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + return + } + url, err = storage.GenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + return + } + } else { + url, err = storage.ObsGenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber) + if err != nil { + ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) + return + } + log.Info("url=" + url) + + } + + ctx.JSON(200, map[string]string{ + "url": url, + }) +} + +func CompleteModelMultipart(ctx *context.Context) { + uuid := ctx.Query("uuid") + uploadID := ctx.Query("uploadID") + typeCloudBrain := ctx.QueryInt("type") + modeluuid := ctx.Query("modeluuid") + log.Warn("uuid:" + uuid) + log.Warn("modeluuid:" + modeluuid) + log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain)) + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + fileChunk, err := models.GetModelFileChunkByUUID(uuid) + if err != nil { + if models.IsErrFileChunkNotExist(err) { + ctx.Error(404) + } else { + ctx.ServerError("GetFileChunkByUUID", err) + } + return + } + + if typeCloudBrain == models.TypeCloudBrainOne { + _, err = storage.CompleteMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) + return + } + } else { + err = storage.CompleteObsMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) + return + } + } + + fileChunk.IsUploaded = models.FileUploaded + + err = models.UpdateModelFileChunk(fileChunk) + if err != nil { + ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err)) + return + } + //更新模型大小信息 + UpdateModelSize(modeluuid) + + ctx.JSON(200, map[string]string{ + "result_code": "0", + }) + +} diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go index 268830dc8..de7bb454d 100755 --- a/routers/repo/grampus.go +++ b/routers/repo/grampus.go @@ -940,15 +940,14 @@ func GrampusGetLog(ctx *context.Context) { content, err := grampus.GetTrainJobLog(job.JobID) if err != nil { log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"]) - ctx.ServerError(err.Error(), err) + ctx.JSON(http.StatusOK, map[string]interface{}{ + "JobName": job.JobName, + "Content": "", + "CanLogDownload": false, + }) return } - var canLogDownload bool - if err != nil { - canLogDownload = false - } else { - canLogDownload = true - } + canLogDownload := err == nil && job.IsUserHasRight(ctx.User) ctx.JSON(http.StatusOK, map[string]interface{}{ "JobName": job.JobName, "Content": content, @@ -958,6 +957,28 @@ func GrampusGetLog(ctx *context.Context) { return } +func GrampusMetrics(ctx *context.Context) { + jobID := ctx.Params(":jobid") + job, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + log.Error("GetCloudbrainByJobID failed: %v", err, ctx.Data["MsgID"]) + ctx.ServerError(err.Error(), err) + return + } + + result, err := grampus.GetGrampusMetrics(job.JobID) + if err != nil { + log.Error("GetTrainJobLog failed: %v", err, ctx.Data["MsgID"]) + } + ctx.JSON(http.StatusOK, map[string]interface{}{ + "JobID": jobID, + "Interval": result.Interval, + "MetricsInfo": result.MetricsInfo, + }) + + return +} + func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelPath, pretrainModelFileName, modelRemoteObsUrl string) (string, error) { var command string diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 9d19d6c5e..2b361b507 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -729,6 +729,13 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/complete_multipart", repo.CompleteMultipart) }) + m.Group("/attachments/model", func() { + m.Get("/get_chunks", repo.GetModelChunks) + m.Get("/new_multipart", repo.NewModelMultipart) + m.Get("/get_multipart_url", repo.GetModelMultipartUploadUrl) + m.Post("/complete_multipart", repo.CompleteModelMultipart) + }) + m.Group("/attachments", func() { m.Get("/public/query", repo.QueryAllPublicDataset) m.Get("/private/:username", repo.QueryPrivateDataset) @@ -1229,6 +1236,12 @@ func RegisterRoutes(m *macaron.Macaron) { }) }, context.RepoRef()) m.Group("/modelmanage", func() { + m.Get("/create_local_model_1", repo.CreateLocalModel) + m.Get("/create_local_model_2", repo.CreateLocalModelForUpload) + m.Get("/create_online_model", repo.CreateOnlineModel) + m.Post("/create_local_model", repo.SaveLocalModel) + m.Delete("/delete_model_file", repo.DeleteModelFile) + m.Post("/create_model", repo.SaveModel) m.Post("/create_model_convert", reqWechatBind, reqRepoModelManageWriter, repo.SaveModelConvert) m.Post("/create_new_model", repo.SaveNewNameModel) diff --git a/services/cloudbrain/cloudbrainTask/sync_status.go b/services/cloudbrain/cloudbrainTask/sync_status.go index 7153a7ec0..67dc4d3b7 100644 --- a/services/cloudbrain/cloudbrainTask/sync_status.go +++ b/services/cloudbrain/cloudbrainTask/sync_status.go @@ -14,7 +14,7 @@ import ( var noteBookOKMap = make(map[int64]int, 20) //if a task notebook url can get two times, the notebook can browser. -const successfulCount = 2 +const successfulCount = 3 func SyncCloudBrainOneStatus(task *models.Cloudbrain) (*models.Cloudbrain, error) { jobResult, err := cloudbrain.GetJob(task.JobID) diff --git a/services/cloudbrain/resource/resource_specification.go b/services/cloudbrain/resource/resource_specification.go index d23fd3aad..8f4182d87 100644 --- a/services/cloudbrain/resource/resource_specification.go +++ b/services/cloudbrain/resource/resource_specification.go @@ -138,6 +138,7 @@ func GetResourceSpecificationList(opts models.SearchResourceSpecificationOptions func GetAllDistinctResourceSpecification(opts models.SearchResourceSpecificationOptions) (*models.ResourceSpecAndQueueListRes, error) { opts.Page = 0 opts.PageSize = 1000 + opts.OrderBy = models.SearchSpecOrder4Standard _, r, err := models.SearchResourceSpecification(opts) if err != nil { return nil, err diff --git a/templates/base/footer_content.tmpl b/templates/base/footer_content.tmpl index cb732bbbe..b4c8518c4 100755 --- a/templates/base/footer_content.tmpl +++ b/templates/base/footer_content.tmpl @@ -24,11 +24,30 @@
{{.LangName}}
- + {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}} API{{end}} {{if .IsSigned}} diff --git a/templates/base/footer_content_fluid.tmpl b/templates/base/footer_content_fluid.tmpl index 723c78045..be17f2781 100755 --- a/templates/base/footer_content_fluid.tmpl +++ b/templates/base/footer_content_fluid.tmpl @@ -22,10 +22,30 @@
{{.LangName}}
+ {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}} API{{end}} {{if .IsSigned}} diff --git a/templates/repo/cloudbrain/trainjob/show.tmpl b/templates/repo/cloudbrain/trainjob/show.tmpl index 7bc3f2c82..8193e80fc 100644 --- a/templates/repo/cloudbrain/trainjob/show.tmpl +++ b/templates/repo/cloudbrain/trainjob/show.tmpl @@ -284,10 +284,7 @@
- -
-
- -
- - - - - -
- -
- -
-
- +{{template "base/footer" .}} diff --git a/templates/repo/modelmanage/create_local_2.tmpl b/templates/repo/modelmanage/create_local_2.tmpl new file mode 100644 index 000000000..5780c6194 --- /dev/null +++ b/templates/repo/modelmanage/create_local_2.tmpl @@ -0,0 +1,11 @@ +{{template "base/head" .}} + +
+ {{template "repo/header" .}} + +
+
+
+
+ +{{template "base/footer" .}} diff --git a/templates/repo/modelmanage/create_online.tmpl b/templates/repo/modelmanage/create_online.tmpl new file mode 100644 index 000000000..32503d1f0 --- /dev/null +++ b/templates/repo/modelmanage/create_online.tmpl @@ -0,0 +1,581 @@ +{{template "base/head" .}} + + +
+
+
+
+
+
+
+
+
+{{$repository := .Repository.ID}} +
+ {{template "repo/header" .}} +
+
+
+
+

{{.i18n.Tr "repo.model.manage.import_online_model"}}

+
+
+
+ +
+ +
+
+ +
+
+ + +   + +
+
+
+
+ +
+
+ +
+
+ +
+
+ +
+ +
+
+
+ +
+
+ + +
+
+
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+
+
+ + +
+
+
+
+
+
+{{template "base/footer" .}} + + + diff --git a/templates/repo/modelmanage/index.tmpl b/templates/repo/modelmanage/index.tmpl index 6a42d96f7..b358384e3 100644 --- a/templates/repo/modelmanage/index.tmpl +++ b/templates/repo/modelmanage/index.tmpl @@ -25,6 +25,23 @@ border-bottom-left-radius: 4px; box-shadow: 0 2px 3px 0 rgb(34 36 38 / 15%); } + .m-blue-btn { + background-color: rgb(22, 132, 252) !important; + } + .m-blue-btn:hover { + background-color: #66b1ff !important; + color: #fff; + } + + .m-blue-btn:focus { + background-color: #66b1ff !important; + color: #fff; + } + + .m-blue-btn:active { + background-color: #3a8ee6 !important; + color: #fff; + } @@ -57,8 +74,10 @@
+ {{$.i18n.Tr "repo.model.manage.import_local_model"}} {{$.i18n.Tr "repo.model.manage.import_new_model"}} + href="{{.RepoLink}}/modelmanage/create_online_model">{{$.i18n.Tr "repo.model.manage.import_online_model"}}
{{if eq $.MODEL_COUNT 0}} @@ -66,6 +85,7 @@
{{$.i18n.Tr "repo.model.manage.notcreatemodel"}}
+ +
{{$.i18n.Tr "repo.model.manage.createmodel_tip"}} {{$.i18n.Tr "repo.model.manage.createtrainjob"}}
{{$.i18n.Tr "repo.platform_instructions1"}} {{$.i18n.Tr "repo.platform_instructions2"}} {{$.i18n.Tr "repo.platform_instructions3"}}
@@ -421,7 +443,8 @@ let train_html = ''; modelData = data; for (let i = 0; i < n_length; i++) { - train_html += `
${data[i].VersionName}
` + var VersionName = data[i].VersionName || 'V0001'; + train_html += `
${VersionName}
` train_html += '' } if (data.length) { @@ -568,5 +591,4 @@ $("#choice_Engine").removeClass('disabled'); } } - \ No newline at end of file diff --git a/templates/repo/modelmanage/showinfo.tmpl b/templates/repo/modelmanage/showinfo.tmpl index 0a29375f1..1b153bb45 100644 --- a/templates/repo/modelmanage/showinfo.tmpl +++ b/templates/repo/modelmanage/showinfo.tmpl @@ -1,533 +1,10 @@ {{template "base/head" .}} -
+ +
{{template "repo/header" .}} -
-

- - - -

-
- -
-
- {{$.i18n.Tr "repo.model.manage.baseinfo"}} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.model_name"}}
{{$.i18n.Tr "repo.model.manage.version"}}
{{$.i18n.Tr "repo.migrate_items_labels"}} -
- -
- - -
{{$.i18n.Tr "repo.modelarts.model_size"}}
{{$.i18n.Tr "repo.modelarts.createtime"}}
{{$.i18n.Tr "repo.model.manage.description"}} -
- - -
-
{{$.i18n.Tr "repo.modelarts.train_job"}} - - - -
{{$.i18n.Tr "repo.modelarts.code_version"}}
{{$.i18n.Tr "repo.modelarts.train_job.start_file"}}
{{$.i18n.Tr "repo.modelarts.train_job.train_dataset"}}
{{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}}
{{$.i18n.Tr "repo.modelarts.train_job.AI_Engine"}}
{{$.i18n.Tr "repo.modelarts.train_job.standard"}}
{{$.i18n.Tr "repo.modelarts.train_job.compute_node"}}
-
-
- {{$.i18n.Tr "repo.model.manage.model_accuracy"}} - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.Accuracy"}}
F1
{{$.i18n.Tr "repo.model.manage.Precision"}}
{{$.i18n.Tr "repo.model.manage.Recall"}}
-
-
-
-
- - - -
- -
-
-
-
+
+
+ {{template "base/footer" .}} - \ No newline at end of file diff --git a/web_src/js/components/Model.vue b/web_src/js/components/Model.vue index 7362246c4..02b8643ae 100644 --- a/web_src/js/components/Model.vue +++ b/web_src/js/components/Model.vue @@ -16,16 +16,17 @@ prop="name" :label="i18n.model_name" align="left" - min-width="17%" + min-width="20%" >