diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini
index bb9edf61e..5eac4cf2e 100755
--- a/options/locale/locale_en-US.ini
+++ b/options/locale/locale_en-US.ini
@@ -959,6 +959,7 @@ unfavorite=Unlike
favorite=Like
disassociate=Disassociate
benchmark_dataset_tip=Note: first use the dataset function to upload the model, and then select the model from the dataset list.
+file_deleted=The file has been deleted
[repo]
owner = Owner
@@ -1221,6 +1222,7 @@ model_Evaluation_not_created = Model evaluation has not been created
repo_not_initialized = Code version: You have not initialized the code repository, please initialized first ;
debug_task_running_limit =Running time: no more than 4 hours, it will automatically stop if it exceeds 4 hours;
dataset_desc = Dataset: Cloud Brain 1 provides CPU/GPU,Cloud Brain 2 provides Ascend NPU.And dataset also needs to be uploaded to the corresponding environment;
+platform_instructions = Instructions for use: You can refer to the OpenI_Learning course of Qizhi AI collaboration platform.
platform_instructions1 = Instructions for use: You can refer to the
platform_instructions2 = OpenI_Learning
platform_instructions3 = course of Openi AI collaboration platform.
diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini
index 527df52d5..2fbd3ab52 100755
--- a/options/locale/locale_zh-CN.ini
+++ b/options/locale/locale_zh-CN.ini
@@ -965,6 +965,7 @@ unfavorite=取消收藏
favorite=收藏
disassociate=取消关联
benchmark_dataset_tip=说明:先使用数据集功能上传模型,然后从数据集列表选模型。
+file_deleted=文件已经被删除
[repo]
owner=拥有者
@@ -1234,6 +1235,7 @@ model_Evaluation_not_created = 未创建过评测任务
repo_not_initialized = 代码版本:您还没有初始化代码仓库,请先创建代码版本;
debug_task_running_limit = 运行时长:最长不超过4个小时,超过4个小时将自动停止;
dataset_desc = 数据集:云脑1提供 CPU / GPU 资源,云脑2提供 Ascend NPU 资源,调试使用的数据集也需要上传到对应的环境;
+platform_instructions = 使用说明:可以参考启智AI协作平台小白训练营课程。
platform_instructions1 = 使用说明:可以参考启智AI协作平台
platform_instructions2 = 小白训练营课程
platform_instructions3 = 。
diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go
index c1e89dde5..181ad6302 100755
--- a/routers/repo/cloudbrain.go
+++ b/routers/repo/cloudbrain.go
@@ -928,7 +928,7 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo
}
}
- ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false, ctx)
ctx.Data["task"] = task
labelName := strings.Fields(task.LabelName)
ctx.Data["LabelName"] = labelName
diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go
index 33e111df2..3b19d9344 100755
--- a/routers/repo/grampus.go
+++ b/routers/repo/grampus.go
@@ -713,7 +713,7 @@ func GrampusTrainJobShow(ctx *context.Context) {
taskList := make([]*models.Cloudbrain, 0)
taskList = append(taskList, task)
ctx.Data["version_list_task"] = taskList
- ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false, ctx)
ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task)
ctx.Data["displayJobName"] = task.DisplayJobName
diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go
index 40e8076fb..a4ce70b58 100755
--- a/routers/repo/modelarts.go
+++ b/routers/repo/modelarts.go
@@ -285,7 +285,7 @@ func NotebookShow(ctx *context.Context) {
datasetDownload := make([]models.DatasetDownload, 0)
if ctx.IsSigned {
if task.Uuid != "" && task.UserID == ctx.User.ID {
- datasetDownload = GetCloudBrainDataSetInfo(task.Uuid, true)
+ datasetDownload = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, true, ctx)
}
}
user, err := models.GetUserByID(task.UserID)
@@ -331,34 +331,40 @@ func NotebookShow(ctx *context.Context) {
ctx.HTML(200, tplModelArtsNotebookShow)
}
-func GetCloudBrainDataSetInfo(uuid string, isNeedDown bool) []models.DatasetDownload {
+func GetCloudBrainDataSetInfo(uuid string, datasetname string, isNeedDown bool, ctx *context.Context) []models.DatasetDownload {
datasetDownload := make([]models.DatasetDownload, 0)
uuidList := strings.Split(uuid, ";")
- for _, uuidStr := range uuidList {
+ datasetnameList := strings.Split(datasetname, ";")
+ for i, uuidStr := range uuidList {
+ name := ""
+ link := ""
attachment, err := models.GetAttachmentByUUID(uuidStr)
if err != nil {
log.Error("GetAttachmentByUUID failed:%v", err.Error())
- return datasetDownload
- }
- dataset, err := models.GetDatasetByID(attachment.DatasetID)
- if err != nil {
- log.Error("GetDatasetByID failed:%v", err.Error())
- return datasetDownload
- }
- repo, err := models.GetRepositoryByID(dataset.RepoID)
- if err != nil {
- log.Error("GetRepositoryByID failed:%v", err.Error())
- return datasetDownload
+ name = datasetnameList[i] + "(" + ctx.Tr("dataset.file_deleted") + ")"
+ } else {
+ name = attachment.Name
+ dataset, err := models.GetDatasetByID(attachment.DatasetID)
+ if err != nil {
+ log.Error("GetDatasetByID failed:%v", err.Error())
+ } else {
+ repo, err := models.GetRepositoryByID(dataset.RepoID)
+ if err != nil {
+ log.Error("GetRepositoryByID failed:%v", err.Error())
+ } else {
+ link = repo.Link() + "/datasets"
+ }
+ }
}
url := ""
if isNeedDown {
url = attachment.S3DownloadURL()
}
datasetDownload = append(datasetDownload, models.DatasetDownload{
- DatasetName: attachment.Name,
+ DatasetName: name,
DatasetDownloadLink: url,
- RepositoryLink: repo.Link() + "/datasets",
+ RepositoryLink: link,
})
}
return datasetDownload
@@ -1810,7 +1816,7 @@ func TrainJobShow(ctx *context.Context) {
} else {
VersionListTasks[i].Parameters = ""
}
- datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, false))
+ datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false, ctx))
VersionListTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain)
VersionListTasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain)
}
@@ -2526,7 +2532,7 @@ func InferenceJobShow(ctx *context.Context) {
ctx.Data["displayJobName"] = task.DisplayJobName
ctx.Data["task"] = task
ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task)
- ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false, ctx)
tempUids := []int64{}
tempUids = append(tempUids, task.UserID)
JobCreater, err := models.GetUserNamesByIDs(tempUids)