Browse Source

Merge pull request 'modelarts' (#160) from modelarts into develop

Reviewed-by: stardust <denglf@pcl.ac.cn>
master
yuyuanshifu 4 years ago
parent
commit
d51c701628
55 changed files with 13026 additions and 109 deletions
  1. +16
    -0
      custom/conf/app.ini.sample
  2. +16
    -21
      models/attachment.go
  3. +200
    -0
      models/cloudbrain.go
  4. +4
    -3
      models/dataset.go
  5. +12
    -6
      models/file_chunk.go
  6. +2
    -2
      models/user.go
  7. +42
    -0
      modules/APIGW-go-sdk-2.0.2/core/escape.go
  8. +208
    -0
      modules/APIGW-go-sdk-2.0.2/core/signer.go
  9. +0
    -1
      modules/auth/cloudbrain.go
  10. +16
    -0
      modules/auth/modelarts.go
  11. +1
    -0
      modules/cloudbrain/cloudbrain.go
  12. +65
    -0
      modules/modelarts/modelarts.go
  13. +288
    -0
      modules/modelarts/resty.go
  14. +466
    -0
      modules/obs/auth.go
  15. +1307
    -0
      modules/obs/client.go
  16. +471
    -0
      modules/obs/conf.go
  17. +932
    -0
      modules/obs/const.go
  18. +880
    -0
      modules/obs/convert.go
  19. +35
    -0
      modules/obs/error.go
  20. +37
    -0
      modules/obs/extension.go
  21. +566
    -0
      modules/obs/http.go
  22. +317
    -0
      modules/obs/log.go
  23. +1236
    -0
      modules/obs/model.go
  24. +543
    -0
      modules/obs/pool.go
  25. +895
    -0
      modules/obs/temporary.go
  26. +909
    -0
      modules/obs/trait.go
  27. +873
    -0
      modules/obs/transfer.go
  28. +536
    -0
      modules/obs/util.go
  29. +35
    -0
      modules/setting/setting.go
  30. +175
    -0
      modules/storage/obs.go
  31. +9
    -0
      modules/storage/storage.go
  32. +6
    -1
      options/locale/locale_zh-CN.ini
  33. +165
    -9
      package-lock.json
  34. +3
    -0
      package.json
  35. +3
    -0
      routers/api/v1/api.go
  36. +45
    -0
      routers/api/v1/repo/modelarts.go
  37. +221
    -43
      routers/repo/attachment.go
  38. +1
    -1
      routers/repo/cloudbrain.go
  39. +2
    -1
      routers/repo/dataset.go
  40. +247
    -0
      routers/repo/modelarts.go
  41. +14
    -0
      routers/routes/routes.go
  42. +1
    -0
      templates/base/head.tmpl
  43. +0
    -2
      templates/explore/dataset_list.tmpl
  44. +1
    -1
      templates/repo/cloudbrain/index.tmpl
  45. +14
    -2
      templates/repo/datasets/dataset.tmpl
  46. +2
    -2
      templates/repo/datasets/dataset_list.tmpl
  47. +14
    -7
      templates/repo/datasets/index.tmpl
  48. +88
    -4
      templates/repo/header.tmpl
  49. +479
    -0
      templates/repo/modelarts/index.tmpl
  50. +184
    -0
      templates/repo/modelarts/new.tmpl
  51. +122
    -0
      templates/repo/modelarts/show.tmpl
  52. +3
    -2
      web_src/js/components/MinioUploader.vue
  53. +298
    -0
      web_src/js/components/ObsUploader.vue
  54. +17
    -0
      web_src/js/index.js
  55. +4
    -1
      webpack.config.js

+ 16
- 0
custom/conf/app.ini.sample View File

@@ -1069,3 +1069,19 @@ PASSWORD = 4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC
[blockchain] [blockchain]
HOST = http://192.168.207.84:3002/ HOST = http://192.168.207.84:3002/
COMMIT_VALID_DATE = 2021-01-15 COMMIT_VALID_DATE = 2021-01-15

[obs]
ENDPOINT = https://obs.cn-south-222.ai.pcl.cn
ACCESS_KEY_ID = FDP3LRMHLB9S77VWEHE3
SECRET_ACCESS_KEY = LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN
BUCKET = testopendata
LOCATION = cn-south-222
BASE_PATH = attachment/

[modelarts]
ENDPOINT = https://modelarts.cn-south-222.ai.pcl.cn
PROJECT_ID = edfccf24aace4e17a56da6bcbb55a5aa
PROJECT_NAME = cn-south-222_test
USERNAME = test1
PASSWORD = Qizhi@test.
DOMAIN = cn-south-222

+ 16
- 21
models/attachment.go View File

@@ -41,6 +41,7 @@ type Attachment struct {
Size int64 `xorm:"DEFAULT 0"` Size int64 `xorm:"DEFAULT 0"`
IsPrivate bool `xorm:"DEFAULT false"` IsPrivate bool `xorm:"DEFAULT false"`
DecompressState int32 `xorm:"DEFAULT 0"` DecompressState int32 `xorm:"DEFAULT 0"`
Type int `xorm:"DEFAULT 0"`
CreatedUnix timeutil.TimeStamp `xorm:"created"` CreatedUnix timeutil.TimeStamp `xorm:"created"`
} }


@@ -350,7 +351,7 @@ func GetUnDecompressAttachments() ([]*Attachment, error) {


func getUnDecompressAttachments(e Engine) ([]*Attachment, error) { func getUnDecompressAttachments(e Engine) ([]*Attachment, error) {
attachments := make([]*Attachment, 0, 10) attachments := make([]*Attachment, 0, 10)
return attachments, e.Where("decompress_state = ? and dataset_id != 0 and name like '%.zip'", DecompressStateInit).Find(&attachments)
return attachments, e.Where("decompress_state = ? and dataset_id != 0 and attachment.type = ? and name like '%.zip'", DecompressStateInit, TypeCloudBrainOne).Find(&attachments)
} }


func GetAllPublicAttachments() ([]*AttachmentUsername, error) { func GetAllPublicAttachments() ([]*AttachmentUsername, error) {
@@ -360,7 +361,7 @@ func GetAllPublicAttachments() ([]*AttachmentUsername, error) {
func getAllPublicAttachments(e Engine) ([]*AttachmentUsername, error) { func getAllPublicAttachments(e Engine) ([]*AttachmentUsername, error) {
attachments := make([]*AttachmentUsername, 0, 10) attachments := make([]*AttachmentUsername, 0, 10)
if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+
"= `user`.id").Where("decompress_state= ? and is_private= ?", DecompressStateDone, false).Find(&attachments); err != nil {
"= `user`.id").Where("decompress_state= ? and is_private= ? and attachment.type = ?", DecompressStateDone, false, TypeCloudBrainOne).Find(&attachments); err != nil {
return nil, err return nil, err
} }
return attachments, nil return attachments, nil
@@ -378,40 +379,34 @@ func GetPrivateAttachments(username string) ([]*AttachmentUsername, error) {
func getPrivateAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { func getPrivateAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) {
attachments := make([]*AttachmentUsername, 0, 10) attachments := make([]*AttachmentUsername, 0, 10)
if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+
"= `user`.id").Where("decompress_state= ? and uploader_id= ?", DecompressStateDone, userID).Find(&attachments); err != nil {
"= `user`.id").Where("decompress_state= ? and uploader_id= ? and attachment.type = ?", DecompressStateDone, userID, TypeCloudBrainOne).Find(&attachments); err != nil {
return nil, err return nil, err
} }
return attachments, nil return attachments, nil
} }


/*
func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) {
attachsPub, err := getAllPublicAttachments(x)
if err != nil {
log.Error("getAllPublicAttachments failed:%v", err)
return nil, err
}

attachsPri, err := getPrivateAttachments(x, userID)
if err != nil {
log.Error("getPrivateAttachments failed:%v", err)
func getAllUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) {
attachments := make([]*AttachmentUsername, 0, 10)
if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+
"= `user`.id").Where("decompress_state= ? and attachment.type = ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, TypeCloudBrainOne, userID, false).Find(&attachments); err != nil {
return nil, err return nil, err
} }

return append(attachsPub, attachsPri...), nil
return attachments, nil
} }


*/
func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) {
return getAllUserAttachments(x, userID)
}


func getAllUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) {
func getModelArtsUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) {
attachments := make([]*AttachmentUsername, 0, 10) attachments := make([]*AttachmentUsername, 0, 10)
if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+
"= `user`.id").Where("decompress_state= ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, userID, false).Find(&attachments); err != nil {
"= `user`.id").Where("attachment.type = ? and (uploader_id= ? or is_private = ?)", TypeCloudBrainTwo, userID, false).Find(&attachments); err != nil {
return nil, err return nil, err
} }
return attachments, nil return attachments, nil
} }


func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) {
return getAllUserAttachments(x, userID)
func GetModelArtsUserAttachments(userID int64) ([]*AttachmentUsername, error) {
return getModelArtsUserAttachments(x, userID)
} }

+ 200
- 0
models/cloudbrain.go View File

@@ -14,6 +14,7 @@ import (


type CloudbrainStatus string type CloudbrainStatus string
type JobType string type JobType string
type ModelArtsJobStatus string


const ( const (
JobWaiting CloudbrainStatus = "WAITING" JobWaiting CloudbrainStatus = "WAITING"
@@ -24,6 +25,22 @@ const (


JobTypeDebug JobType = "DEBUG" JobTypeDebug JobType = "DEBUG"
JobTypeBenchmark JobType = "BENCHMARK" JobTypeBenchmark JobType = "BENCHMARK"

ModelArtsCreateQueue ModelArtsJobStatus = "CREATE_QUEUING" //免费资源创建排队中
ModelArtsCreating ModelArtsJobStatus = "CREATING" //创建中
ModelArtsCreateFailed ModelArtsJobStatus = "CREATE_FAILED" //创建失败
ModelArtsStartQueuing ModelArtsJobStatus = "START_QUEUING" //免费资源启动排队中
ModelArtsReadyToStart ModelArtsJobStatus = "READY_TO_START" //免费资源等待启动
ModelArtsStarting ModelArtsJobStatus = "STARTING" //启动中
ModelArtsRestarting ModelArtsJobStatus = "RESTARTING" //重启中
ModelArtsStartFailed ModelArtsJobStatus = "START_FAILED" //启动失败
ModelArtsRunning ModelArtsJobStatus = "RUNNING" //运行中
ModelArtsStopping ModelArtsJobStatus = "STOPPING" //停止中
ModelArtsStopped ModelArtsJobStatus = "STOPPED" //停止
ModelArtsUnavailable ModelArtsJobStatus = "UNAVAILABLE" //故障
ModelArtsDeleted ModelArtsJobStatus = "DELETED" //已删除
ModelArtsResizing ModelArtsJobStatus = "RESIZING" //规格变更中
ModelArtsResizFailed ModelArtsJobStatus = "RESIZE_FAILED" //规格变更失败
) )


type Cloudbrain struct { type Cloudbrain struct {
@@ -41,6 +58,7 @@ type Cloudbrain struct {
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
DeletedAt time.Time `xorm:"deleted"` DeletedAt time.Time `xorm:"deleted"`
CanDebug bool `xorm:"-"` CanDebug bool `xorm:"-"`
Type int `xorm:"INDEX DEFAULT 0"`


User *User `xorm:"-"` User *User `xorm:"-"`
Repo *Repository `xorm:"-"` Repo *Repository `xorm:"-"`
@@ -117,6 +135,7 @@ type CloudbrainsOptions struct {
SortType string SortType string
CloudbrainIDs []int64 CloudbrainIDs []int64
// JobStatus CloudbrainStatus // JobStatus CloudbrainStatus
Type int
} }
type TaskPod struct { type TaskPod struct {
TaskRoleStatus struct { TaskRoleStatus struct {
@@ -263,6 +282,181 @@ type StopJobResult struct {
Msg string `json:"msg"` Msg string `json:"msg"`
} }


type CreateNotebookParams struct {
JobName string `json:"name"`
Description string `json:"description"`
ProfileID string `json:"profile_id"`
Flavor string `json:"flavor"`
Spec Spec `json:"spec"`
Workspace Workspace `json:"workspace"`
}

type Workspace struct {
ID string `json:"id"`
}

type Spec struct {
Storage Storage `json:"storage"`
AutoStop AutoStop `json:"auto_stop"`
}

type AutoStop struct {
Enable bool `json:"enable"`
Duration int `json:"duration"`
}

type Storage struct {
Type string `json:"type"`
Location Location `json:"location"`
}

type Location struct {
Path string `json:"path"`
}

type NotebookResult struct {
ErrorCode string `json:"error_code"`
ErrorMsg string `json:"error_msg"`
}

type CreateNotebookResult struct {
ErrorCode string `json:"error_code"`
ErrorMsg string `json:"error_msg"`
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Status string `json:"status"`
CreationTimestamp string `json:"creation_timestamp"`
LatestUpdateTimestamp string `json:"latest_update_timestamp"`
Profile struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
DeType string `json:"de_type"`
FlavorType string `json:"flavor_type"`
} `json:"profile"`
Flavor string `json:"flavor"`
FlavorDetails struct{
Name string `json:"name"`
Status string `json:"status"`
QueuingNum int `json:"queuing_num"`
QueueLeftTime int `json:"queue_left_time"` //s
Duration int `json:"duration"` //auto_stop_time s
} `json:"flavor_details"`
}

type GetNotebookResult struct {
ErrorCode string `json:"error_code"`
ErrorMsg string `json:"error_msg"`
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Status string `json:"status"`
CreationTimestamp string `json:"creation_timestamp"`
CreateTime string
LatestUpdateTimestamp string `json:"latest_update_timestamp"`
LatestUpdateTime string
Profile struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
DeType string `json:"de_type"`
FlavorType string `json:"flavor_type"`
} `json:"profile"`
Flavor string `json:"flavor"`
FlavorDetails struct{
Name string `json:"name"`
Status string `json:"status"`
QueuingNum int `json:"queuing_num"`
QueueLeftTime int `json:"queue_left_time"` //s
Duration int `json:"duration"` //auto_stop_time s
} `json:"flavor_details"`
QueuingInfo struct{
ID string `json:"id"`
Name string `json:"name"`
Flavor string `json:"flavor"`
DeType string `json:"de_type"`
Status string `json:"status"`
BeginTimestamp int `json:"begin_timestamp"`//time of instance begin in queue
BeginTime string
RemainTime int `json:"remain_time"` //remain time of instance
EndTimestamp int `json:"end_timestamp"` //
EndTime string
Rank int `json:"rank"` //rank of instance in queue
} `json:"queuing_info"`
Spec struct{
Annotations struct{
TargetDomain string `json:"target_domain"`
Url string `json:"url"`
} `json:"annotations"`
} `json:"spec"`
}

type GetTokenParams struct {
Auth Auth `json:"auth"`
}

type Auth struct {
Identity Identity `json:"identity"`
Scope Scope `json:"scope"`
}

type Scope struct {
Project Project `json:"project"`
}

type Project struct {
Name string `json:"name"`
}

type Identity struct {
Methods []string `json:"methods"`
Password Password `json:"password"`
}

type Password struct {
User NotebookUser `json:"user"`
}

type NotebookUser struct {
Name string `json:"name"`
Password string `json:"password"`
Domain Domain `json:"domain"`
}

type Domain struct {
Name string `json:"name"`
}

const (
ActionStart = "start"
ActionStop = "stop"
ActionRestart = "restart"
ActionQueue = "queue"
ActionDequeue = "dequeue"
)

type NotebookAction struct {
Action string `json:"action"`
}

type NotebookActionResult struct {
ErrorCode string `json:"error_code"`
ErrorMsg string `json:"error_msg"`
CurrentStatus string `json:"current_status"`
PreviousState string `json:"previous_state"`
}

type NotebookGetJobTokenResult struct {
ErrorCode string `json:"error_code"`
ErrorMsg string `json:"error_msg"`
Token string `json:"token"`
}

type NotebookDelResult struct {
InstanceID string `json:"instance_id"`
}

func Cloudbrains(opts *CloudbrainsOptions) ([]*Cloudbrain, int64, error) { func Cloudbrains(opts *CloudbrainsOptions) ([]*Cloudbrain, int64, error) {
sess := x.NewSession() sess := x.NewSession()
defer sess.Close() defer sess.Close()
@@ -286,6 +480,12 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*Cloudbrain, int64, error) {
) )
} }


if (opts.Type) >= 0 {
cond = cond.And(
builder.Eq{"cloudbrain.type": opts.Type},
)
}

// switch opts.JobStatus { // switch opts.JobStatus {
// case JobWaiting: // case JobWaiting:
// cond.And(builder.Eq{"cloudbrain.status": int(JobWaiting)}) // cond.And(builder.Eq{"cloudbrain.status": int(JobWaiting)})


+ 4
- 3
models/dataset.go View File

@@ -196,11 +196,11 @@ func (s datasetMetaSearch) Less(i, j int) bool {
return s.ID[i] < s.ID[j] return s.ID[i] < s.ID[j]
} }


func GetDatasetAttachments(rels ...*Dataset) (err error) {
return getDatasetAttachments(x, rels...)
func GetDatasetAttachments(typeCloudBrain int ,rels ...*Dataset) (err error) {
return getDatasetAttachments(x, typeCloudBrain, rels...)
} }


func getDatasetAttachments(e Engine, rels ...*Dataset) (err error) {
func getDatasetAttachments(e Engine, typeCloudBrain int, rels ...*Dataset) (err error) {
if len(rels) == 0 { if len(rels) == 0 {
return return
} }
@@ -223,6 +223,7 @@ func getDatasetAttachments(e Engine, rels ...*Dataset) (err error) {
err = e. err = e.
Asc("dataset_id"). Asc("dataset_id").
In("dataset_id", sortedRels.ID). In("dataset_id", sortedRels.ID).
And("type = ?", typeCloudBrain).
Find(&attachments, Attachment{}) Find(&attachments, Attachment{})
if err != nil { if err != nil {
return err return err


+ 12
- 6
models/file_chunk.go View File

@@ -10,6 +10,11 @@ const (
FileUploaded FileUploaded
) )


const (
TypeCloudBrainOne = 0
TypeCloudBrainTwo = 1
)

type FileChunk struct { type FileChunk struct {
ID int64 `xorm:"pk autoincr"` ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"` UUID string `xorm:"uuid UNIQUE"`
@@ -19,7 +24,8 @@ type FileChunk struct {
TotalChunks int TotalChunks int
Size int64 Size int64
UserID int64 `xorm:"INDEX"` UserID int64 `xorm:"INDEX"`
CompletedParts []string `xorm:"DEFAULT """` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas
Type int `xorm:"INDEX DEFAULT 0"`
CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
} }
@@ -41,14 +47,14 @@ func getFileChunkByMD5(e Engine, md5 string) (*FileChunk, error) {
} }


// GetFileChunkByMD5 returns fileChunk by given id // GetFileChunkByMD5 returns fileChunk by given id
func GetFileChunkByMD5AndUser(md5 string, userID int64) (*FileChunk, error) {
return getFileChunkByMD5AndUser(x, md5, userID)
func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) {
return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain)
} }


func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64) (*FileChunk, error) {
func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) {
fileChunk := new(FileChunk) fileChunk := new(FileChunk)


if has, err := e.Where("md5 = ? and user_id = ?", md5, userID).Get(fileChunk); err != nil {
if has, err := e.Where("md5 = ? and user_id = ? and type = ?", md5, userID, typeCloudBrain).Get(fileChunk); err != nil {
return nil, err return nil, err
} else if !has { } else if !has {
return nil, ErrFileChunkNotExist{md5, ""} return nil, ErrFileChunkNotExist{md5, ""}
@@ -89,6 +95,6 @@ func UpdateFileChunk(fileChunk *FileChunk) error {
func updateFileChunk(e Engine, fileChunk *FileChunk) error { func updateFileChunk(e Engine, fileChunk *FileChunk) error {
var sess *xorm.Session var sess *xorm.Session
sess = e.Where("uuid = ?", fileChunk.UUID) sess = e.Where("uuid = ?", fileChunk.UUID)
_, err := sess.Cols("is_uploaded", "completed_parts").Update(fileChunk)
_, err := sess.Cols("is_uploaded").Update(fileChunk)
return err return err
} }

+ 2
- 2
models/user.go View File

@@ -2045,8 +2045,8 @@ func SyncExternalUsers(ctx context.Context, updateExisting bool) error {


func GetBlockChainUnSuccessUsers() ([]*User, error) { func GetBlockChainUnSuccessUsers() ([]*User, error) {
users := make([]*User, 0, 10) users := make([]*User, 0, 10)
err := x.Where("public_key is null").
Or("private_key is null").
err := x.Where("public_key = ''").
Or("private_key = ''").
Find(&users) Find(&users)
return users, err return users, err
} }

+ 42
- 0
modules/APIGW-go-sdk-2.0.2/core/escape.go View File

@@ -0,0 +1,42 @@
// based on https://github.com/golang/go/blob/master/src/net/url/url.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package core

func shouldEscape(c byte) bool {
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c == '-' || c == '~' || c == '.' {
return false
}
return true
}
func escape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}

if hexCount == 0 {
return s
}

t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case shouldEscape(c):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}

+ 208
- 0
modules/APIGW-go-sdk-2.0.2/core/signer.go View File

@@ -0,0 +1,208 @@
// HWS API Gateway Signature
// based on https://github.com/datastream/aws/blob/master/signv4.go
// Copyright (c) 2014, Xianjie

package core

import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"fmt"
"io/ioutil"
"net/http"
"sort"
"strings"
"time"
)

const (
BasicDateFormat = "20060102T150405Z"
Algorithm = "SDK-HMAC-SHA256"
HeaderXDate = "X-Sdk-Date"
HeaderHost = "host"
HeaderAuthorization = "Authorization"
HeaderContentSha256 = "X-Sdk-Content-Sha256"
)

func hmacsha256(key []byte, data string) ([]byte, error) {
h := hmac.New(sha256.New, []byte(key))
if _, err := h.Write([]byte(data)); err != nil {
return nil, err
}
return h.Sum(nil), nil
}

// Build a CanonicalRequest from a regular request string
//
// CanonicalRequest =
// HTTPRequestMethod + '\n' +
// CanonicalURI + '\n' +
// CanonicalQueryString + '\n' +
// CanonicalHeaders + '\n' +
// SignedHeaders + '\n' +
// HexEncode(Hash(RequestPayload))
func CanonicalRequest(r *http.Request, signedHeaders []string) (string, error) {
var hexencode string
var err error
if hex := r.Header.Get(HeaderContentSha256); hex != "" {
hexencode = hex
} else {
data, err := RequestPayload(r)
if err != nil {
return "", err
}
hexencode, err = HexEncodeSHA256Hash(data)
if err != nil {
return "", err
}
}
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, CanonicalURI(r), CanonicalQueryString(r), CanonicalHeaders(r, signedHeaders), strings.Join(signedHeaders, ";"), hexencode), err
}

// CanonicalURI returns request uri
func CanonicalURI(r *http.Request) string {
pattens := strings.Split(r.URL.Path, "/")
var uri []string
for _, v := range pattens {
uri = append(uri, escape(v))
}
urlpath := strings.Join(uri, "/")
if len(urlpath) == 0 || urlpath[len(urlpath)-1] != '/' {
urlpath = urlpath + "/"
}
return urlpath
}

// CanonicalQueryString
func CanonicalQueryString(r *http.Request) string {
var keys []string
query := r.URL.Query()
for key := range query {
keys = append(keys, key)
}
sort.Strings(keys)
var a []string
for _, key := range keys {
k := escape(key)
sort.Strings(query[key])
for _, v := range query[key] {
kv := fmt.Sprintf("%s=%s", k, escape(v))
a = append(a, kv)
}
}
queryStr := strings.Join(a, "&")
r.URL.RawQuery = queryStr
return queryStr
}

// CanonicalHeaders
func CanonicalHeaders(r *http.Request, signerHeaders []string) string {
var a []string
header := make(map[string][]string)
for k, v := range r.Header {
header[strings.ToLower(k)] = v
}
for _, key := range signerHeaders {
value := header[key]
if strings.EqualFold(key, HeaderHost) {
value = []string{r.Host}
}
sort.Strings(value)
for _, v := range value {
a = append(a, key+":"+strings.TrimSpace(v))
}
}
return fmt.Sprintf("%s\n", strings.Join(a, "\n"))
}

// SignedHeaders
func SignedHeaders(r *http.Request) []string {
var a []string
for key := range r.Header {
a = append(a, strings.ToLower(key))
}
sort.Strings(a)
return a
}

// RequestPayload
func RequestPayload(r *http.Request) ([]byte, error) {
if r.Body == nil {
return []byte(""), nil
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return []byte(""), err
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
return b, err
}

// Create a "String to Sign".
func StringToSign(canonicalRequest string, t time.Time) (string, error) {
hash := sha256.New()
_, err := hash.Write([]byte(canonicalRequest))
if err != nil {
return "", err
}
return fmt.Sprintf("%s\n%s\n%x",
Algorithm, t.UTC().Format(BasicDateFormat), hash.Sum(nil)), nil
}

// Create the HWS Signature.
func SignStringToSign(stringToSign string, signingKey []byte) (string, error) {
hm, err := hmacsha256(signingKey, stringToSign)
return fmt.Sprintf("%x", hm), err
}

// HexEncodeSHA256Hash returns hexcode of sha256
func HexEncodeSHA256Hash(body []byte) (string, error) {
hash := sha256.New()
if body == nil {
body = []byte("")
}
_, err := hash.Write(body)
return fmt.Sprintf("%x", hash.Sum(nil)), err
}

// Get the finalized value for the "Authorization" header. The signature parameter is the output from SignStringToSign
func AuthHeaderValue(signature, accessKey string, signedHeaders []string) string {
return fmt.Sprintf("%s Access=%s, SignedHeaders=%s, Signature=%s", Algorithm, accessKey, strings.Join(signedHeaders, ";"), signature)
}

// Signature HWS meta
type Signer struct {
Key string
Secret string
}

// SignRequest set Authorization header
func (s *Signer) Sign(r *http.Request) error {
var t time.Time
var err error
var dt string
if dt = r.Header.Get(HeaderXDate); dt != "" {
t, err = time.Parse(BasicDateFormat, dt)
}
if err != nil || dt == "" {
t = time.Now()
r.Header.Set(HeaderXDate, t.UTC().Format(BasicDateFormat))
}
signedHeaders := SignedHeaders(r)
canonicalRequest, err := CanonicalRequest(r, signedHeaders)
if err != nil {
return err
}
stringToSign, err := StringToSign(canonicalRequest, t)
if err != nil {
return err
}
signature, err := SignStringToSign(stringToSign, []byte(s.Secret))
if err != nil {
return err
}
authValue := AuthHeaderValue(signature, s.Key, signedHeaders)
r.Header.Set(HeaderAuthorization, authValue)
return nil
}

+ 0
- 1
modules/auth/cloudbrain.go View File

@@ -5,7 +5,6 @@ import (
"gitea.com/macaron/macaron" "gitea.com/macaron/macaron"
) )


// CreateDatasetForm form for dataset page
type CreateCloudBrainForm struct { type CreateCloudBrainForm struct {
JobName string `form:"job_name" binding:"Required"` JobName string `form:"job_name" binding:"Required"`
Image string `form:"image" binding:"Required"` Image string `form:"image" binding:"Required"`


+ 16
- 0
modules/auth/modelarts.go View File

@@ -0,0 +1,16 @@
package auth

import (
"gitea.com/macaron/binding"
"gitea.com/macaron/macaron"
)

type CreateModelArtsForm struct {
JobName string `form:"job_name" binding:"Required"`
Attachment string `form:"attachment" binding:"Required"`
Description string `form:"description"`
}

func (f *CreateModelArtsForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
return validate(errs, ctx.Data, f, ctx.Locale)
}

+ 1
- 0
modules/cloudbrain/cloudbrain.go View File

@@ -98,6 +98,7 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath,
JobName: jobName, JobName: jobName,
SubTaskName: SubTaskName, SubTaskName: SubTaskName,
JobType: jobType, JobType: jobType,
Type: models.TypeCloudBrainOne,
}) })


if err != nil { if err != nil {


+ 65
- 0
modules/modelarts/modelarts.go View File

@@ -0,0 +1,65 @@
package modelarts

import (
"code.gitea.io/gitea/modules/setting"
"path"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
)

const (
storageTypeOBS = "obs"
autoStopDuration = 4 * 60 * 60
flavor = "modelarts.kat1.xlarge"
profileID = "Python3-ascend910-arm"

DataSetMountPath = "/home/ma-user/work"
NotebookEnv = "Python3"
NotebookType = "Ascend"
FlavorInfo = "Ascend: 1*Ascend 910 CPU: 24 核 96GiB (modelarts.kat1.xlarge)"
)

func GenerateTask(ctx *context.Context, jobName, uuid, description string) error {
dataActualPath := setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/"
jobResult, err := CreateJob(models.CreateNotebookParams{
JobName: jobName,
Description:description,
ProfileID: profileID,
Flavor: flavor,
Spec: models.Spec{
Storage: models.Storage{
Type: storageTypeOBS,
Location:models.Location{
Path: dataActualPath,
},
},
AutoStop: models.AutoStop{
Enable: true,
Duration: autoStopDuration,
},
},

})
if err != nil {
log.Error("CreateJob failed: %v", err.Error())
return err
}

err = models.CreateCloudbrain(&models.Cloudbrain{
Status: string(models.JobWaiting),
UserID: ctx.User.ID,
RepoID: ctx.Repo.Repository.ID,
JobID: jobResult.ID,
JobName: jobName,
JobType: string(models.JobTypeDebug),
Type: models.TypeCloudBrainTwo,
})

if err != nil {
return err
}

return nil
}

+ 288
- 0
modules/modelarts/resty.go View File

@@ -0,0 +1,288 @@
package modelarts

import (
"code.gitea.io/gitea/modules/log"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/setting"
"github.com/go-resty/resty/v2"
)

var (
restyClient *resty.Client
HOST string
TOKEN string
)

const (
methodPassword = "password"

urlGetToken = "/v3/auth/tokens"
urlNotebook = "/demanager/instances"
errorCodeExceedLimit = "ModelArts.0118"
)
func getRestyClient() *resty.Client {
if restyClient == nil {
restyClient = resty.New()
restyClient.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true})
}
return restyClient
}

func checkSetting() {
if len(HOST) != 0 && len(TOKEN) != 0 && restyClient != nil {
return
}

err := getToken()
if err != nil {
log.Error("getToken failed:%v", err)
}
}

func getToken() error {
HOST = setting.ModelArtsHost

client := getRestyClient()
params := models.GetTokenParams{
Auth: models.Auth{
Identity: models.Identity{
Methods: []string{methodPassword},
Password: models.Password{
User: models.NotebookUser{
Name: setting.ModelArtsUsername,
Password: setting.ModelArtsPassword,
Domain: models.Domain{
Name: setting.ModelArtsDomain,
},
},
},
},
Scope: models.Scope{
Project: models.Project{
Name: setting.ProjectName,
},
},
},
}

res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
Post(setting.IamHost + urlGetToken)
if err != nil {
return fmt.Errorf("resty getToken: %v", err)
}

if res.StatusCode() != http.StatusCreated {
return fmt.Errorf("getToken failed:%s", res.String())
}

TOKEN = res.Header().Get("X-Subject-Token")

return nil
}

func CreateJob(createJobParams models.CreateNotebookParams) (*models.CreateNotebookResult, error) {
checkSetting()
client := getRestyClient()
var result models.CreateNotebookResult

retry := 0

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetAuthToken(TOKEN).
SetBody(createJobParams).
SetResult(&result).
Post(HOST + "/v1/" + setting.ProjectID + urlNotebook)

if err != nil {
return nil, fmt.Errorf("resty create job: %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
_ = getToken()
goto sendjob
}

var response models.NotebookResult
err = json.Unmarshal(res.Body(), &response)
if err != nil {
log.Error("json.Unmarshal failed: %s", err.Error())
return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error())
}

if len(response.ErrorCode) != 0 {
log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
if response.ErrorCode == errorCodeExceedLimit {
response.ErrorMsg = "所选规格使用数量已超过最大配额限制。"
}
return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
}

return &result, nil
}

func GetJob(jobID string) (*models.GetNotebookResult, error) {
checkSetting()
client := getRestyClient()
var result models.GetNotebookResult

retry := 0

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID)

if err != nil {
return nil, fmt.Errorf("resty GetJob: %v", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
_ = getToken()
goto sendjob
}

var response models.NotebookResult
err = json.Unmarshal(res.Body(), &response)
if err != nil {
log.Error("json.Unmarshal failed: %s", err.Error())
return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error())
}

if len(response.ErrorCode) != 0 {
log.Error("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
return &result, fmt.Errorf("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
}

return &result, nil
}

func StopJob(jobID string, param models.NotebookAction) (*models.NotebookActionResult, error) {
checkSetting()
client := getRestyClient()
var result models.NotebookActionResult

retry := 0

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(param).
SetAuthToken(TOKEN).
SetResult(&result).
Post(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID + "/action")

if err != nil {
return &result, fmt.Errorf("resty StopJob: %v", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
_ = getToken()
goto sendjob
}

var response models.NotebookResult
err = json.Unmarshal(res.Body(), &response)
if err != nil {
log.Error("json.Unmarshal failed: %s", err.Error())
return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error())
}

if len(response.ErrorCode) != 0 {
log.Error("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
return &result, fmt.Errorf("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
}

return &result, nil
}

func DelJob(jobID string) (*models.NotebookDelResult, error) {
checkSetting()
client := getRestyClient()
var result models.NotebookDelResult

retry := 0

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetAuthToken(TOKEN).
SetResult(&result).
Delete(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID)

if err != nil {
return &result, fmt.Errorf("resty DelJob: %v", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
_ = getToken()
goto sendjob
}

var response models.NotebookResult
err = json.Unmarshal(res.Body(), &response)
if err != nil {
log.Error("json.Unmarshal failed: %s", err.Error())
return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error())
}

if len(response.ErrorCode) != 0 {
log.Error("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
return &result, fmt.Errorf("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)
}

return &result, nil
}

func GetJobToken(jobID string) (*models.NotebookGetJobTokenResult, error) {
checkSetting()
client := getRestyClient()
var result models.NotebookGetJobTokenResult

retry := 0

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID + "/token")

if err != nil {
return &result, fmt.Errorf("resty GetJobToken: %v", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
_ = getToken()
goto sendjob
}

var response models.NotebookResult
err = json.Unmarshal(res.Body(), &response)
if err != nil {
log.Error("json.Unmarshal failed: %s", err.Error())
return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error())
}

if len(response.ErrorCode) != 0 {
log.Error("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg)
return &result, fmt.Errorf("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg)
}

return &result, nil
}

+ 466
- 0
modules/obs/auth.go View File

@@ -0,0 +1,466 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"net/url"
"sort"
"strings"
"time"
)

func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, expires int64) (requestURL string, err error) {
isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == ""
if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken
}
}
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)
hostName := parsedRequestURL.Host

isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)

if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
if isV4 {
date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
delete(headers, HEADER_DATE_CAMEL)
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)
if len(headers[HEADER_HOST_CAMEL]) != 0 {
index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":")
if index != -1 {
port := headers[HEADER_HOST_CAMEL][0][index+1:]
if port == "80" || port == "443" {
headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]}
}
}

}

signedHeaders, _headers := getSignedHeaders(headers)

credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")

requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
return "", _err
}

stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)

requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))

} else {
originDate := headers[HEADER_DATE_CAMEL][0]
date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate)
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
expires += date.Unix()
headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}

stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs)
signature := UrlEncode(Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign))), false)
if strings.Index(requestURL, "?") < 0 {
requestURL += "?"
} else {
requestURL += "&"
}
delete(headers, HEADER_DATE_CAMEL)

if obsClient.conf.signature != SignatureObs {
requestURL += "AWS"
}
requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(obsClient.conf.securityProvider.ak, false), expires, signature)
}
}

return
}

func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, hostName string) (requestURL string, err error) {
isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == ""
if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = []string{obsClient.conf.securityProvider.securityToken}
} else {
headers[HEADER_STS_TOKEN_AMZ] = []string{obsClient.conf.securityProvider.securityToken}
}
}
isObs := obsClient.conf.signature == SignatureObs
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)

if hostName == "" {
hostName = parsedRequestURL.Host
}

isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)

if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
ak := obsClient.conf.securityProvider.ak
sk := obsClient.conf.securityProvider.sk
var authorization string
if isV4 {
headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD}
ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers)
authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else {
ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
hashPrefix := V2_HASH_PREFIX
if isObs {
hashPrefix = OBS_HASH_PREFIX
}
authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"])
}
headers[HEADER_AUTH_CAMEL] = []string{authorization}
}
return
}

func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) {
headers[HEADER_HOST_CAMEL] = []string{hostName}
if date, ok := headers[HEADER_DATE_AMZ]; ok {
flag := false
if len(date) == 1 {
if isV4 {
if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
flag = true
}
} else {
if strings.HasSuffix(date[0], "GMT") {
headers[HEADER_DATE_CAMEL] = []string{date[0]}
flag = true
}
}
}
if !flag {
delete(headers, HEADER_DATE_AMZ)
}
}
if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
}
}

func encodeHeaders(headers map[string][]string) {
for key, values := range headers {
for index, value := range values {
values[index] = UrlEncode(value, true)
}
headers[key] = values
}
}

func attachHeaders(headers map[string][]string, isObs bool) string {
length := len(headers)
_headers := make(map[string][]string, length)
keys := make([]string, 0, length)

for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
prefixheader := HEADER_PREFIX
if isObs {
prefixheader = HEADER_PREFIX_OBS
}
if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) {
keys = append(keys, _key)
_headers[_key] = value
}
} else {
delete(headers, key)
}
}

for _, interestedHeader := range interestedHeaders {
if _, ok := _headers[interestedHeader]; !ok {
_headers[interestedHeader] = []string{""}
keys = append(keys, interestedHeader)
}
}
dateCamelHeader := PARAM_DATE_AMZ_CAMEL
dataHeader := HEADER_DATE_AMZ
if isObs {
dateCamelHeader = PARAM_DATE_OBS_CAMEL
dataHeader = HEADER_DATE_OBS
}
if _, ok := _headers[HEADER_DATE_CAMEL]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
} else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
}

sort.Strings(keys)

stringToSign := make([]string, 0, len(keys))
for _, key := range keys {
var value string
prefixHeader := HEADER_PREFIX
prefixMetaHeader := HEADER_PREFIX_META
if isObs {
prefixHeader = HEADER_PREFIX_OBS
prefixMetaHeader = HEADER_PREFIX_META_OBS
}
if strings.HasPrefix(key, prefixHeader) {
if strings.HasPrefix(key, prefixMetaHeader) {
for index, v := range _headers[key] {
value += strings.TrimSpace(v)
if index != len(_headers[key])-1 {
value += ","
}
}
} else {
value = strings.Join(_headers[key], ",")
}
value = fmt.Sprintf("%s:%s", key, value)
} else {
value = strings.Join(_headers[key], ",")
}
stringToSign = append(stringToSign, value)
}
return strings.Join(stringToSign, "\n")
}

func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string {
stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "")

var isSecurityToken bool
var securityToken []string
if isObs {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]
} else {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
parmas := strings.Split(canonicalizedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
}
logStringToSign := stringToSign
if isSecurityToken && len(securityToken) > 0 {
logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign)
return stringToSign
}

func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string {
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))}
}

func getScope(region, shortDate string) string {
return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
}

func getCredential(ak, region, shortDate string) (string, string) {
scope := getScope(region, shortDate)
return fmt.Sprintf("%s/%s", ak, scope), scope
}

func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
canonicalRequest = append(canonicalRequest, method)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, canonicalizedURL)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, queryURL)
canonicalRequest = append(canonicalRequest, "\n")

for _, signedHeader := range signedHeaders {
values, _ := headers[signedHeader]
for _, value := range values {
canonicalRequest = append(canonicalRequest, signedHeader)
canonicalRequest = append(canonicalRequest, ":")
canonicalRequest = append(canonicalRequest, value)
canonicalRequest = append(canonicalRequest, "\n")
}
}
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, payload)

_canonicalRequest := strings.Join(canonicalRequest, "")

var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
query = strings.Split(queryURL, "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
logCanonicalRequest := _canonicalRequest
if isSecurityToken && len(securityToken) > 0 {
logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest)

stringToSign := make([]string, 0, 7)
stringToSign = append(stringToSign, V4_HASH_PREFIX)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, longDate)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, scope)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))

_stringToSign := strings.Join(stringToSign, "")

doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign)
return _stringToSign
}

func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
length := len(headers)
_headers := make(map[string][]string, length)
signedHeaders := make([]string, 0, length)
for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
signedHeaders = append(signedHeaders, _key)
_headers[_key] = value
} else {
delete(headers, key)
}
}
sort.Strings(signedHeaders)
return signedHeaders, _headers
}

func getSignature(stringToSign, sk, region, shortDate string) string {
key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
key = HmacSha256(key, []byte(region))
key = HmacSha256(key, []byte(V4_SERVICE_NAME))
key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
return Hex(HmacSha256(key, []byte(stringToSign)))
}

// V4Auth is a wrapper for v4Auth
func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers)
}

func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
var t time.Time
if val, ok := headers[HEADER_DATE_AMZ]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[HEADER_DATE_CAMEL]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else {
t = time.Now().UTC()
}
shortDate := t.Format(SHORT_DATE_FORMAT)
longDate := t.Format(LONG_DATE_FORMAT)

signedHeaders, _headers := getSignedHeaders(headers)

credential, scope := getCredential(ak, region, shortDate)

payload := UNSIGNED_PAYLOAD
if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok {
payload = val[0]
}
stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers)

signature := getSignature(stringToSign, sk, region, shortDate)

ret := make(map[string]string, 3)
ret["Credential"] = credential
ret["SignedHeaders"] = strings.Join(signedHeaders, ";")
ret["Signature"] = signature
return ret
}

+ 1307
- 0
modules/obs/client.go
File diff suppressed because it is too large
View File


+ 471
- 0
modules/obs/conf.go View File

@@ -0,0 +1,471 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)

type securityProvider struct {
ak string
sk string
securityToken string
}

type urlHolder struct {
scheme string
host string
port int
}

type config struct {
securityProvider *securityProvider
urlHolder *urlHolder
pathStyle bool
cname bool
sslVerify bool
endpoint string
signature SignatureType
region string
connectTimeout int
socketTimeout int
headerTimeout int
idleConnTimeout int
finalTimeout int
maxRetryCount int
proxyURL string
maxConnsPerHost int
pemCerts []byte
transport *http.Transport
ctx context.Context
maxRedirectCount int
}

func (conf config) String() string {
return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+
"\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+
"\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]",
conf.endpoint, conf.signature, conf.pathStyle, conf.region,
conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount,
)
}

type configurer func(conf *config)

// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts.
func WithSslVerify(sslVerify bool) configurer {
return WithSslVerifyAndPemCerts(sslVerify, nil)
}

// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts.
func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
return func(conf *config) {
conf.sslVerify = sslVerify
conf.pemCerts = pemCerts
}
}

// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers.
func WithHeaderTimeout(headerTimeout int) configurer {
return func(conf *config) {
conf.headerTimeout = headerTimeout
}
}

// WithProxyUrl is a configurer for ObsClient to set HTTP proxy.
func WithProxyUrl(proxyURL string) configurer {
return func(conf *config) {
conf.proxyURL = proxyURL
}
}

// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections.
func WithMaxConnections(maxConnsPerHost int) configurer {
return func(conf *config) {
conf.maxConnsPerHost = maxConnsPerHost
}
}

// WithPathStyle is a configurer for ObsClient.
func WithPathStyle(pathStyle bool) configurer {
return func(conf *config) {
conf.pathStyle = pathStyle
}
}

// WithSignature is a configurer for ObsClient.
func WithSignature(signature SignatureType) configurer {
return func(conf *config) {
conf.signature = signature
}
}

// WithRegion is a configurer for ObsClient.
func WithRegion(region string) configurer {
return func(conf *config) {
conf.region = region
}
}

// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing
// an http/https connection, in seconds.
func WithConnectTimeout(connectTimeout int) configurer {
return func(conf *config) {
conf.connectTimeout = connectTimeout
}
}

// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at
// the socket layer, in seconds.
func WithSocketTimeout(socketTimeout int) configurer {
return func(conf *config) {
conf.socketTimeout = socketTimeout
}
}

// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection
// in the connection pool, in seconds.
func WithIdleConnTimeout(idleConnTimeout int) configurer {
return func(conf *config) {
conf.idleConnTimeout = idleConnTimeout
}
}

// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal.
func WithMaxRetryCount(maxRetryCount int) configurer {
return func(conf *config) {
conf.maxRetryCount = maxRetryCount
}
}

// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys.
func WithSecurityToken(securityToken string) configurer {
return func(conf *config) {
conf.securityProvider.securityToken = securityToken
}
}

// WithHttpTransport is a configurer for ObsClient to set the customized http Transport.
func WithHttpTransport(transport *http.Transport) configurer {
return func(conf *config) {
conf.transport = transport
}
}

// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request.
func WithRequestContext(ctx context.Context) configurer {
return func(conf *config) {
conf.ctx = ctx
}
}

// WithCustomDomainName is a configurer for ObsClient.
func WithCustomDomainName(cname bool) configurer {
return func(conf *config) {
conf.cname = cname
}
}

// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected.
func WithMaxRedirectCount(maxRedirectCount int) configurer {
return func(conf *config) {
conf.maxRedirectCount = maxRedirectCount
}
}

func (conf *config) prepareConfig() {
if conf.connectTimeout <= 0 {
conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
}

if conf.socketTimeout <= 0 {
conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
}

conf.finalTimeout = conf.socketTimeout * 10

if conf.headerTimeout <= 0 {
conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
}

if conf.idleConnTimeout < 0 {
conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
}

if conf.maxRetryCount < 0 {
conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
}

if conf.maxConnsPerHost <= 0 {
conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
}

if conf.maxRedirectCount < 0 {
conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT
}
}

func (conf *config) initConfigWithDefault() error {
conf.securityProvider.ak = strings.TrimSpace(conf.securityProvider.ak)
conf.securityProvider.sk = strings.TrimSpace(conf.securityProvider.sk)
conf.securityProvider.securityToken = strings.TrimSpace(conf.securityProvider.securityToken)
conf.endpoint = strings.TrimSpace(conf.endpoint)
if conf.endpoint == "" {
return errors.New("endpoint is not set")
}

if index := strings.Index(conf.endpoint, "?"); index > 0 {
conf.endpoint = conf.endpoint[:index]
}

for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
}

if conf.signature == "" {
conf.signature = DEFAULT_SIGNATURE
}

urlHolder := &urlHolder{}
var address string
if strings.HasPrefix(conf.endpoint, "https://") {
urlHolder.scheme = "https"
address = conf.endpoint[len("https://"):]
} else if strings.HasPrefix(conf.endpoint, "http://") {
urlHolder.scheme = "http"
address = conf.endpoint[len("http://"):]
} else {
urlHolder.scheme = "https"
address = conf.endpoint
}

addr := strings.Split(address, ":")
if len(addr) == 2 {
if port, err := strconv.Atoi(addr[1]); err == nil {
urlHolder.port = port
}
}
urlHolder.host = addr[0]
if urlHolder.port == 0 {
if urlHolder.scheme == "https" {
urlHolder.port = 443
} else {
urlHolder.port = 80
}
}

if IsIP(urlHolder.host) {
conf.pathStyle = true
}

conf.urlHolder = urlHolder

conf.region = strings.TrimSpace(conf.region)
if conf.region == "" {
conf.region = DEFAULT_REGION
}

conf.prepareConfig()
conf.proxyURL = strings.TrimSpace(conf.proxyURL)
return nil
}

func (conf *config) getTransport() error {
if conf.transport == nil {
conf.transport = &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
if err != nil {
return nil, err
}
return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
},
MaxIdleConns: conf.maxConnsPerHost,
MaxIdleConnsPerHost: conf.maxConnsPerHost,
ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
}

if conf.proxyURL != "" {
proxyURL, err := url.Parse(conf.proxyURL)
if err != nil {
return err
}
conf.transport.Proxy = http.ProxyURL(proxyURL)
}

tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
if conf.sslVerify && conf.pemCerts != nil {
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(conf.pemCerts)
tlsConfig.RootCAs = pool
}

conf.transport.TLSClientConfig = tlsConfig
conf.transport.DisableCompression = true
}

return nil
}

func checkRedirectFunc(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}

// DummyQueryEscape return the input string.
func DummyQueryEscape(s string) string {
return s
}

func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) {
urlHolder := conf.urlHolder
if conf.cname {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
if conf.signature == "v4" {
canonicalizedURL = "/"
} else {
canonicalizedURL = "/" + urlHolder.host + "/"
}
} else {
if bucketName == "" {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
canonicalizedURL = "/"
} else {
if conf.pathStyle {
requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
canonicalizedURL = "/" + bucketName
} else {
requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
if conf.signature == "v2" || conf.signature == "OBS" {
canonicalizedURL = "/" + bucketName + "/"
} else {
canonicalizedURL = "/"
}
}
}
}
return
}

func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) {
if escape {
tempKey := []rune(objectKey)
result := make([]string, 0, len(tempKey))
for _, value := range tempKey {
if string(value) == "/" {
result = append(result, string(value))
} else {
if string(value) == " " {
result = append(result, url.PathEscape(string(value)))
} else {
result = append(result, url.QueryEscape(string(value)))
}
}
}
encodeObjectKey = strings.Join(result, "")
} else {
encodeObjectKey = escapeFunc(objectKey)
}
return
}

func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) {
if escape {
return url.QueryEscape
}
return DummyQueryEscape
}

func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) {

requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName)
var escapeFunc func(s string) string
escapeFunc = conf.prepareEscapeFunc(escape)

if objectKey != "" {
var encodeObjectKey string
encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc)
requestURL += "/" + encodeObjectKey
if !strings.HasSuffix(canonicalizedURL, "/") {
canonicalizedURL += "/"
}
canonicalizedURL += encodeObjectKey
}

keys := make([]string, 0, len(params))
for key := range params {
keys = append(keys, strings.TrimSpace(key))
}
sort.Strings(keys)
i := 0

for index, key := range keys {
if index == 0 {
requestURL += "?"
} else {
requestURL += "&"
}
_key := url.QueryEscape(key)
requestURL += _key

_value := params[key]
if conf.signature == "v4" {
requestURL += "=" + url.QueryEscape(_value)
} else {
if _value != "" {
requestURL += "=" + url.QueryEscape(_value)
_value = "=" + _value
} else {
_value = ""
}
lowerKey := strings.ToLower(key)
_, ok := allowedResourceParameterNames[lowerKey]
prefixHeader := HEADER_PREFIX
isObs := conf.signature == SignatureObs
if isObs {
prefixHeader = HEADER_PREFIX_OBS
}
ok = ok || strings.HasPrefix(lowerKey, prefixHeader)
if ok {
if i == 0 {
canonicalizedURL += "?"
} else {
canonicalizedURL += "&"
}
canonicalizedURL += getQueryURL(_key, _value)
i++
}
}
}
return
}

func getQueryURL(key, value string) string {
queryURL := ""
queryURL += key
queryURL += value
return queryURL
}

+ 932
- 0
modules/obs/const.go View File

@@ -0,0 +1,932 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

const (
obsSdkVersion = "3.20.9"
USER_AGENT = "obs-sdk-go/" + obsSdkVersion
HEADER_PREFIX = "x-amz-"
HEADER_PREFIX_META = "x-amz-meta-"
HEADER_PREFIX_OBS = "x-obs-"
HEADER_PREFIX_META_OBS = "x-obs-meta-"
HEADER_DATE_AMZ = "x-amz-date"
HEADER_DATE_OBS = "x-obs-date"
HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
HEADER_STS_TOKEN_OBS = "x-obs-security-token"
HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId"
PREFIX_META = "meta-"

HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
HEADER_ACL_AMZ = "x-amz-acl"
HEADER_ACL_OBS = "x-obs-acl"
HEADER_ACL = "acl"
HEADER_LOCATION_AMZ = "location"
HEADER_BUCKET_LOCATION_OBS = "bucket-location"
HEADER_COPY_SOURCE = "copy-source"
HEADER_COPY_SOURCE_RANGE = "copy-source-range"
HEADER_RANGE = "Range"
HEADER_STORAGE_CLASS = "x-default-storage-class"
HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class"
HEADER_VERSION_OBS = "version"
HEADER_GRANT_READ_OBS = "grant-read"
HEADER_GRANT_WRITE_OBS = "grant-write"
HEADER_GRANT_READ_ACP_OBS = "grant-read-acp"
HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp"
HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control"
HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered"
HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered"
HEADER_REQUEST_ID = "request-id"
HEADER_BUCKET_REGION = "bucket-region"
HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
HEADER_EPID_HEADERS = "epid"
HEADER_VERSION_ID = "version-id"
HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
HEADER_DELETE_MARKER = "delete-marker"
HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
HEADER_METADATA_DIRECTIVE = "metadata-directive"
HEADER_EXPIRATION = "expiration"
HEADER_EXPIRES_OBS = "x-obs-expires"
HEADER_RESTORE = "restore"
HEADER_OBJECT_TYPE = "object-type"
HEADER_NEXT_APPEND_POSITION = "next-append-position"
HEADER_STORAGE_CLASS2 = "storage-class"
HEADER_CONTENT_LENGTH = "content-length"
HEADER_CONTENT_TYPE = "content-type"
HEADER_CONTENT_LANGUAGE = "content-language"
HEADER_EXPIRES = "expires"
HEADER_CACHE_CONTROL = "cache-control"
HEADER_CONTENT_DISPOSITION = "content-disposition"
HEADER_CONTENT_ENCODING = "content-encoding"
HEADER_AZ_REDUNDANCY = "az-redundancy"
headerOefMarker = "oef-marker"

HEADER_ETAG = "etag"
HEADER_LASTMODIFIED = "last-modified"

HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match"
HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match"
HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since"
HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since"

HEADER_IF_MATCH = "If-Match"
HEADER_IF_NONE_MATCH = "If-None-Match"
HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"

HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
HEADER_SSEC_KEY = "server-side-encryption-customer-key"
HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"

HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id"

HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm"
HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key"
HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5"

HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"

HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id"

HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect"

HEADER_DATE_CAMEL = "Date"
HEADER_HOST_CAMEL = "Host"
HEADER_HOST = "host"
HEADER_AUTH_CAMEL = "Authorization"
HEADER_MD5_CAMEL = "Content-MD5"
HEADER_LOCATION_CAMEL = "Location"
HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
HEADER_CONTENT_TYPE_CAML = "Content-Type"
HEADER_USER_AGENT_CAMEL = "User-Agent"
HEADER_ORIGIN_CAMEL = "Origin"
HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
HEADER_CACHE_CONTROL_CAMEL = "Cache-Control"
HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition"
HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding"
HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language"
HEADER_EXPIRES_CAMEL = "Expires"

PARAM_VERSION_ID = "versionId"
PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
PARAM_RESPONSE_EXPIRES = "response-expires"
PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
PARAM_IMAGE_PROCESS = "x-image-process"

PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
PARAM_DATE_OBS_CAMEL = "X-Obs-Date"
PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"

DEFAULT_SIGNATURE = SignatureV2
DEFAULT_REGION = "region"
DEFAULT_CONNECT_TIMEOUT = 60
DEFAULT_SOCKET_TIMEOUT = 60
DEFAULT_HEADER_TIMEOUT = 60
DEFAULT_IDLE_CONN_TIMEOUT = 30
DEFAULT_MAX_RETRY_COUNT = 3
DEFAULT_MAX_REDIRECT_COUNT = 3
DEFAULT_MAX_CONN_PER_HOST = 1000
EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
LONG_DATE_FORMAT = "20060102T150405Z"
SHORT_DATE_FORMAT = "20060102"
ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"

V4_SERVICE_NAME = "s3"
V4_SERVICE_SUFFIX = "aws4_request"

V2_HASH_PREFIX = "AWS"
OBS_HASH_PREFIX = "OBS"

V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
V4_HASH_PRE = "AWS4"

DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms"

DEFAULT_SSE_C_ENCRYPTION = "AES256"

HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_HEAD = "HEAD"
HTTP_OPTIONS = "OPTIONS"

REQUEST_PAYER = "request-payer"
MULTI_AZ = "3az"

MAX_PART_SIZE = 5 * 1024 * 1024 * 1024
MIN_PART_SIZE = 100 * 1024
DEFAULT_PART_SIZE = 9 * 1024 * 1024
MAX_PART_NUM = 10000
)

// SignatureType defines type of signature
type SignatureType string

const (
// SignatureV2 signature type v2
SignatureV2 SignatureType = "v2"
// SignatureV4 signature type v4
SignatureV4 SignatureType = "v4"
// SignatureObs signature type OBS
SignatureObs SignatureType = "OBS"
)

var (
interestedHeaders = []string{"content-md5", "content-type", "date"}

allowedRequestHTTPHeaderMetadataNames = map[string]bool{
"content-type": true,
"content-md5": true,
"content-length": true,
"content-language": true,
"expires": true,
"origin": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
"access-control-request-method": true,
"access-control-request-headers": true,
"x-default-storage-class": true,
"location": true,
"date": true,
"etag": true,
"range": true,
"host": true,
"if-modified-since": true,
"if-unmodified-since": true,
"if-match": true,
"if-none-match": true,
"last-modified": true,
"content-range": true,
}

allowedResourceParameterNames = map[string]bool{
"acl": true,
"backtosource": true,
"metadata": true,
"policy": true,
"torrent": true,
"logging": true,
"location": true,
"storageinfo": true,
"quota": true,
"storageclass": true,
"storagepolicy": true,
"requestpayment": true,
"versions": true,
"versioning": true,
"versionid": true,
"uploads": true,
"uploadid": true,
"partnumber": true,
"website": true,
"notification": true,
"lifecycle": true,
"deletebucket": true,
"delete": true,
"cors": true,
"restore": true,
"tagging": true,
"append": true,
"position": true,
"replication": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"x-image-process": true,
"x-oss-process": true,
"x-image-save-bucket": true,
"x-image-save-object": true,
"ignore-sign-in-query": true,
}

mimeTypes = map[string]string{
"001": "application/x-001",
"301": "application/x-301",
"323": "text/h323",
"7z": "application/x-7z-compressed",
"906": "application/x-906",
"907": "drawing/907",
"IVF": "video/x-ivf",
"a11": "application/x-a11",
"aac": "audio/x-aac",
"acp": "audio/x-mei-aac",
"ai": "application/postscript",
"aif": "audio/aiff",
"aifc": "audio/aiff",
"aiff": "audio/aiff",
"anv": "application/x-anv",
"apk": "application/vnd.android.package-archive",
"asa": "text/asa",
"asf": "video/x-ms-asf",
"asp": "text/asp",
"asx": "video/x-ms-asf",
"atom": "application/atom+xml",
"au": "audio/basic",
"avi": "video/avi",
"awf": "application/vnd.adobe.workflow",
"biz": "text/xml",
"bmp": "application/x-bmp",
"bot": "application/x-bot",
"bz2": "application/x-bzip2",
"c4t": "application/x-c4t",
"c90": "application/x-c90",
"cal": "application/x-cals",
"cat": "application/vnd.ms-pki.seccat",
"cdf": "application/x-netcdf",
"cdr": "application/x-cdr",
"cel": "application/x-cel",
"cer": "application/x-x509-ca-cert",
"cg4": "application/x-g4",
"cgm": "application/x-cgm",
"cit": "application/x-cit",
"class": "java/*",
"cml": "text/xml",
"cmp": "application/x-cmp",
"cmx": "application/x-cmx",
"cot": "application/x-cot",
"crl": "application/pkix-crl",
"crt": "application/x-x509-ca-cert",
"csi": "application/x-csi",
"css": "text/css",
"csv": "text/csv",
"cu": "application/cu-seeme",
"cut": "application/x-cut",
"dbf": "application/x-dbf",
"dbm": "application/x-dbm",
"dbx": "application/x-dbx",
"dcd": "text/xml",
"dcx": "application/x-dcx",
"deb": "application/x-debian-package",
"der": "application/x-x509-ca-cert",
"dgn": "application/x-dgn",
"dib": "application/x-dib",
"dll": "application/x-msdownload",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"dot": "application/msword",
"drw": "application/x-drw",
"dtd": "text/xml",
"dvi": "application/x-dvi",
"dwf": "application/x-dwf",
"dwg": "application/x-dwg",
"dxb": "application/x-dxb",
"dxf": "application/x-dxf",
"edn": "application/vnd.adobe.edn",
"emf": "application/x-emf",
"eml": "message/rfc822",
"ent": "text/xml",
"eot": "application/vnd.ms-fontobject",
"epi": "application/x-epi",
"eps": "application/postscript",
"epub": "application/epub+zip",
"etd": "application/x-ebx",
"etx": "text/x-setext",
"exe": "application/x-msdownload",
"fax": "image/fax",
"fdf": "application/vnd.fdf",
"fif": "application/fractals",
"flac": "audio/flac",
"flv": "video/x-flv",
"fo": "text/xml",
"frm": "application/x-frm",
"g4": "application/x-g4",
"gbr": "application/x-gbr",
"gif": "image/gif",
"gl2": "application/x-gl2",
"gp4": "application/x-gp4",
"gz": "application/gzip",
"hgl": "application/x-hgl",
"hmr": "application/x-hmr",
"hpg": "application/x-hpgl",
"hpl": "application/x-hpl",
"hqx": "application/mac-binhex40",
"hrf": "application/x-hrf",
"hta": "application/hta",
"htc": "text/x-component",
"htm": "text/html",
"html": "text/html",
"htt": "text/webviewhtml",
"htx": "text/html",
"icb": "application/x-icb",
"ico": "application/x-ico",
"ics": "text/calendar",
"iff": "application/x-iff",
"ig4": "application/x-g4",
"igs": "application/x-igs",
"iii": "application/x-iphone",
"img": "application/x-img",
"ini": "text/plain",
"ins": "application/x-internet-signup",
"ipa": "application/vnd.iphone",
"iso": "application/x-iso9660-image",
"isp": "application/x-internet-signup",
"jar": "application/java-archive",
"java": "java/*",
"jfif": "image/jpeg",
"jpe": "image/jpeg",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"js": "application/x-javascript",
"json": "application/json",
"jsp": "text/html",
"la1": "audio/x-liquid-file",
"lar": "application/x-laplayer-reg",
"latex": "application/x-latex",
"lavs": "audio/x-liquid-secure",
"lbm": "application/x-lbm",
"lmsff": "audio/x-la-lms",
"log": "text/plain",
"ls": "application/x-javascript",
"ltr": "application/x-ltr",
"m1v": "video/x-mpeg",
"m2v": "video/x-mpeg",
"m3u": "audio/mpegurl",
"m4a": "audio/mp4",
"m4e": "video/mpeg4",
"m4v": "video/mp4",
"mac": "application/x-mac",
"man": "application/x-troff-man",
"math": "text/xml",
"mdb": "application/msaccess",
"mfp": "application/x-shockwave-flash",
"mht": "message/rfc822",
"mhtml": "message/rfc822",
"mi": "application/x-mi",
"mid": "audio/mid",
"midi": "audio/mid",
"mil": "application/x-mil",
"mml": "text/xml",
"mnd": "audio/x-musicnet-download",
"mns": "audio/x-musicnet-stream",
"mocha": "application/x-javascript",
"mov": "video/quicktime",
"movie": "video/x-sgi-movie",
"mp1": "audio/mp1",
"mp2": "audio/mp2",
"mp2v": "video/mpeg",
"mp3": "audio/mp3",
"mp4": "video/mp4",
"mp4a": "audio/mp4",
"mp4v": "video/mp4",
"mpa": "video/x-mpg",
"mpd": "application/vnd.ms-project",
"mpe": "video/mpeg",
"mpeg": "video/mpeg",
"mpg": "video/mpeg",
"mpg4": "video/mp4",
"mpga": "audio/rn-mpeg",
"mpp": "application/vnd.ms-project",
"mps": "video/x-mpeg",
"mpt": "application/vnd.ms-project",
"mpv": "video/mpg",
"mpv2": "video/mpeg",
"mpw": "application/vnd.ms-project",
"mpx": "application/vnd.ms-project",
"mtx": "text/xml",
"mxp": "application/x-mmxp",
"net": "image/pnetvue",
"nrf": "application/x-nrf",
"nws": "message/rfc822",
"odc": "text/x-ms-odc",
"oga": "audio/ogg",
"ogg": "audio/ogg",
"ogv": "video/ogg",
"ogx": "application/ogg",
"out": "application/x-out",
"p10": "application/pkcs10",
"p12": "application/x-pkcs12",
"p7b": "application/x-pkcs7-certificates",
"p7c": "application/pkcs7-mime",
"p7m": "application/pkcs7-mime",
"p7r": "application/x-pkcs7-certreqresp",
"p7s": "application/pkcs7-signature",
"pbm": "image/x-portable-bitmap",
"pc5": "application/x-pc5",
"pci": "application/x-pci",
"pcl": "application/x-pcl",
"pcx": "application/x-pcx",
"pdf": "application/pdf",
"pdx": "application/vnd.adobe.pdx",
"pfx": "application/x-pkcs12",
"pgl": "application/x-pgl",
"pgm": "image/x-portable-graymap",
"pic": "application/x-pic",
"pko": "application/vnd.ms-pki.pko",
"pl": "application/x-perl",
"plg": "text/html",
"pls": "audio/scpls",
"plt": "application/x-plt",
"png": "image/png",
"pnm": "image/x-portable-anymap",
"pot": "application/vnd.ms-powerpoint",
"ppa": "application/vnd.ms-powerpoint",
"ppm": "application/x-ppm",
"pps": "application/vnd.ms-powerpoint",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"pr": "application/x-pr",
"prf": "application/pics-rules",
"prn": "application/x-prn",
"prt": "application/x-prt",
"ps": "application/postscript",
"ptn": "application/x-ptn",
"pwz": "application/vnd.ms-powerpoint",
"qt": "video/quicktime",
"r3t": "text/vnd.rn-realtext3d",
"ra": "audio/vnd.rn-realaudio",
"ram": "audio/x-pn-realaudio",
"rar": "application/x-rar-compressed",
"ras": "application/x-ras",
"rat": "application/rat-file",
"rdf": "text/xml",
"rec": "application/vnd.rn-recording",
"red": "application/x-red",
"rgb": "application/x-rgb",
"rjs": "application/vnd.rn-realsystem-rjs",
"rjt": "application/vnd.rn-realsystem-rjt",
"rlc": "application/x-rlc",
"rle": "application/x-rle",
"rm": "application/vnd.rn-realmedia",
"rmf": "application/vnd.adobe.rmf",
"rmi": "audio/mid",
"rmj": "application/vnd.rn-realsystem-rmj",
"rmm": "audio/x-pn-realaudio",
"rmp": "application/vnd.rn-rn_music_package",
"rms": "application/vnd.rn-realmedia-secure",
"rmvb": "application/vnd.rn-realmedia-vbr",
"rmx": "application/vnd.rn-realsystem-rmx",
"rnx": "application/vnd.rn-realplayer",
"rp": "image/vnd.rn-realpix",
"rpm": "audio/x-pn-realaudio-plugin",
"rsml": "application/vnd.rn-rsml",
"rss": "application/rss+xml",
"rt": "text/vnd.rn-realtext",
"rtf": "application/x-rtf",
"rv": "video/vnd.rn-realvideo",
"sam": "application/x-sam",
"sat": "application/x-sat",
"sdp": "application/sdp",
"sdw": "application/x-sdw",
"sgm": "text/sgml",
"sgml": "text/sgml",
"sis": "application/vnd.symbian.install",
"sisx": "application/vnd.symbian.install",
"sit": "application/x-stuffit",
"slb": "application/x-slb",
"sld": "application/x-sld",
"slk": "drawing/x-slk",
"smi": "application/smil",
"smil": "application/smil",
"smk": "application/x-smk",
"snd": "audio/basic",
"sol": "text/plain",
"sor": "text/plain",
"spc": "application/x-pkcs7-certificates",
"spl": "application/futuresplash",
"spp": "text/xml",
"ssm": "application/streamingmedia",
"sst": "application/vnd.ms-pki.certstore",
"stl": "application/vnd.ms-pki.stl",
"stm": "text/html",
"sty": "application/x-sty",
"svg": "image/svg+xml",
"swf": "application/x-shockwave-flash",
"tar": "application/x-tar",
"tdf": "application/x-tdf",
"tg4": "application/x-tg4",
"tga": "application/x-tga",
"tif": "image/tiff",
"tiff": "image/tiff",
"tld": "text/xml",
"top": "drawing/x-top",
"torrent": "application/x-bittorrent",
"tsd": "text/xml",
"ttf": "application/x-font-ttf",
"txt": "text/plain",
"uin": "application/x-icq",
"uls": "text/iuls",
"vcf": "text/x-vcard",
"vda": "application/x-vda",
"vdx": "application/vnd.visio",
"vml": "text/xml",
"vpg": "application/x-vpeg005",
"vsd": "application/vnd.visio",
"vss": "application/vnd.visio",
"vst": "application/x-vst",
"vsw": "application/vnd.visio",
"vsx": "application/vnd.visio",
"vtx": "application/vnd.visio",
"vxml": "text/xml",
"wav": "audio/wav",
"wax": "audio/x-ms-wax",
"wb1": "application/x-wb1",
"wb2": "application/x-wb2",
"wb3": "application/x-wb3",
"wbmp": "image/vnd.wap.wbmp",
"webm": "video/webm",
"wiz": "application/msword",
"wk3": "application/x-wk3",
"wk4": "application/x-wk4",
"wkq": "application/x-wkq",
"wks": "application/x-wks",
"wm": "video/x-ms-wm",
"wma": "audio/x-ms-wma",
"wmd": "application/x-ms-wmd",
"wmf": "application/x-wmf",
"wml": "text/vnd.wap.wml",
"wmv": "video/x-ms-wmv",
"wmx": "video/x-ms-wmx",
"wmz": "application/x-ms-wmz",
"woff": "application/x-font-woff",
"wp6": "application/x-wp6",
"wpd": "application/x-wpd",
"wpg": "application/x-wpg",
"wpl": "application/vnd.ms-wpl",
"wq1": "application/x-wq1",
"wr1": "application/x-wr1",
"wri": "application/x-wri",
"wrk": "application/x-wrk",
"ws": "application/x-ws",
"ws2": "application/x-ws",
"wsc": "text/scriptlet",
"wsdl": "text/xml",
"wvx": "video/x-ms-wvx",
"x_b": "application/x-x_b",
"x_t": "application/x-x_t",
"xap": "application/x-silverlight-app",
"xbm": "image/x-xbitmap",
"xdp": "application/vnd.adobe.xdp",
"xdr": "text/xml",
"xfd": "application/vnd.adobe.xfd",
"xfdf": "application/vnd.adobe.xfdf",
"xhtml": "text/html",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlw": "application/x-xlw",
"xml": "text/xml",
"xpl": "audio/scpls",
"xpm": "image/x-xpixmap",
"xq": "text/xml",
"xql": "text/xml",
"xquery": "text/xml",
"xsd": "text/xml",
"xsl": "text/xml",
"xslt": "text/xml",
"xwd": "application/x-xwd",
"yaml": "text/yaml",
"yml": "text/yaml",
"zip": "application/zip",
}
)

// HttpMethodType defines http method type
type HttpMethodType string

const (
HttpMethodGet HttpMethodType = HTTP_GET
HttpMethodPut HttpMethodType = HTTP_PUT
HttpMethodPost HttpMethodType = HTTP_POST
HttpMethodDelete HttpMethodType = HTTP_DELETE
HttpMethodHead HttpMethodType = HTTP_HEAD
HttpMethodOptions HttpMethodType = HTTP_OPTIONS
)

// SubResourceType defines the subResource value
type SubResourceType string

const (
// SubResourceStoragePolicy subResource value: storagePolicy
SubResourceStoragePolicy SubResourceType = "storagePolicy"

// SubResourceStorageClass subResource value: storageClass
SubResourceStorageClass SubResourceType = "storageClass"

// SubResourceQuota subResource value: quota
SubResourceQuota SubResourceType = "quota"

// SubResourceStorageInfo subResource value: storageinfo
SubResourceStorageInfo SubResourceType = "storageinfo"

// SubResourceLocation subResource value: location
SubResourceLocation SubResourceType = "location"

// SubResourceAcl subResource value: acl
SubResourceAcl SubResourceType = "acl"

// SubResourcePolicy subResource value: policy
SubResourcePolicy SubResourceType = "policy"

// SubResourceCors subResource value: cors
SubResourceCors SubResourceType = "cors"

// SubResourceVersioning subResource value: versioning
SubResourceVersioning SubResourceType = "versioning"

// SubResourceWebsite subResource value: website
SubResourceWebsite SubResourceType = "website"

// SubResourceLogging subResource value: logging
SubResourceLogging SubResourceType = "logging"

// SubResourceLifecycle subResource value: lifecycle
SubResourceLifecycle SubResourceType = "lifecycle"

// SubResourceNotification subResource value: notification
SubResourceNotification SubResourceType = "notification"

// SubResourceTagging subResource value: tagging
SubResourceTagging SubResourceType = "tagging"

// SubResourceDelete subResource value: delete
SubResourceDelete SubResourceType = "delete"

// SubResourceVersions subResource value: versions
SubResourceVersions SubResourceType = "versions"

// SubResourceUploads subResource value: uploads
SubResourceUploads SubResourceType = "uploads"

// SubResourceRestore subResource value: restore
SubResourceRestore SubResourceType = "restore"

// SubResourceMetadata subResource value: metadata
SubResourceMetadata SubResourceType = "metadata"

// SubResourceRequestPayment subResource value: requestPayment
SubResourceRequestPayment SubResourceType = "requestPayment"
)

// objectKeyType defines the objectKey value
type objectKeyType string

const (
// objectKeyExtensionPolicy objectKey value: v1/extension_policy
objectKeyExtensionPolicy objectKeyType = "v1/extension_policy"

// objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs
objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs"
)

// AclType defines bucket/object acl type
type AclType string

const (
AclPrivate AclType = "private"
AclPublicRead AclType = "public-read"
AclPublicReadWrite AclType = "public-read-write"
AclAuthenticatedRead AclType = "authenticated-read"
AclBucketOwnerRead AclType = "bucket-owner-read"
AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
AclLogDeliveryWrite AclType = "log-delivery-write"
AclPublicReadDelivery AclType = "public-read-delivered"
AclPublicReadWriteDelivery AclType = "public-read-write-delivered"
)

// StorageClassType defines bucket storage class
type StorageClassType string

const (
//StorageClassStandard storage class: STANDARD
StorageClassStandard StorageClassType = "STANDARD"

//StorageClassWarm storage class: WARM
StorageClassWarm StorageClassType = "WARM"

//StorageClassCold storage class: COLD
StorageClassCold StorageClassType = "COLD"

storageClassStandardIA StorageClassType = "STANDARD_IA"
storageClassGlacier StorageClassType = "GLACIER"
)

// PermissionType defines permission type
type PermissionType string

const (
// PermissionRead permission type: READ
PermissionRead PermissionType = "READ"

// PermissionWrite permission type: WRITE
PermissionWrite PermissionType = "WRITE"

// PermissionReadAcp permission type: READ_ACP
PermissionReadAcp PermissionType = "READ_ACP"

// PermissionWriteAcp permission type: WRITE_ACP
PermissionWriteAcp PermissionType = "WRITE_ACP"

// PermissionFullControl permission type: FULL_CONTROL
PermissionFullControl PermissionType = "FULL_CONTROL"
)

// GranteeType defines grantee type
type GranteeType string

const (
// GranteeGroup grantee type: Group
GranteeGroup GranteeType = "Group"

// GranteeUser grantee type: CanonicalUser
GranteeUser GranteeType = "CanonicalUser"
)

// GroupUriType defines grantee uri type
type GroupUriType string

const (
// GroupAllUsers grantee uri type: AllUsers
GroupAllUsers GroupUriType = "AllUsers"

// GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers
GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers"

// GroupLogDelivery grantee uri type: LogDelivery
GroupLogDelivery GroupUriType = "LogDelivery"
)

// VersioningStatusType defines bucket version status
type VersioningStatusType string

const (
// VersioningStatusEnabled version status: Enabled
VersioningStatusEnabled VersioningStatusType = "Enabled"

// VersioningStatusSuspended version status: Suspended
VersioningStatusSuspended VersioningStatusType = "Suspended"
)

// ProtocolType defines protocol type
type ProtocolType string

const (
// ProtocolHttp prorocol type: http
ProtocolHttp ProtocolType = "http"

// ProtocolHttps prorocol type: https
ProtocolHttps ProtocolType = "https"
)

// RuleStatusType defines lifeCycle rule status
type RuleStatusType string

const (
// RuleStatusEnabled rule status: Enabled
RuleStatusEnabled RuleStatusType = "Enabled"

// RuleStatusDisabled rule status: Disabled
RuleStatusDisabled RuleStatusType = "Disabled"
)

// RestoreTierType defines restore options
type RestoreTierType string

const (
// RestoreTierExpedited restore options: Expedited
RestoreTierExpedited RestoreTierType = "Expedited"

// RestoreTierStandard restore options: Standard
RestoreTierStandard RestoreTierType = "Standard"

// RestoreTierBulk restore options: Bulk
RestoreTierBulk RestoreTierType = "Bulk"
)

// MetadataDirectiveType defines metadata operation indicator
type MetadataDirectiveType string

const (
// CopyMetadata metadata operation: COPY
CopyMetadata MetadataDirectiveType = "COPY"

// ReplaceNew metadata operation: REPLACE_NEW
ReplaceNew MetadataDirectiveType = "REPLACE_NEW"

// ReplaceMetadata metadata operation: REPLACE
ReplaceMetadata MetadataDirectiveType = "REPLACE"
)

// EventType defines bucket notification type of events
type EventType string

const (
// ObjectCreatedAll type of events: ObjectCreated:*
ObjectCreatedAll EventType = "ObjectCreated:*"

// ObjectCreatedPut type of events: ObjectCreated:Put
ObjectCreatedPut EventType = "ObjectCreated:Put"

// ObjectCreatedPost type of events: ObjectCreated:Post
ObjectCreatedPost EventType = "ObjectCreated:Post"

// ObjectCreatedCopy type of events: ObjectCreated:Copy
ObjectCreatedCopy EventType = "ObjectCreated:Copy"

// ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload
ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload"

// ObjectRemovedAll type of events: ObjectRemoved:*
ObjectRemovedAll EventType = "ObjectRemoved:*"

// ObjectRemovedDelete type of events: ObjectRemoved:Delete
ObjectRemovedDelete EventType = "ObjectRemoved:Delete"

// ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated
ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated"
)

// PayerType defines type of payer
type PayerType string

const (
// BucketOwnerPayer type of payer: BucketOwner
BucketOwnerPayer PayerType = "BucketOwner"

// RequesterPayer type of payer: Requester
RequesterPayer PayerType = "Requester"

// Requester header for requester-Pays
Requester PayerType = "requester"
)

// FetchPolicyStatusType defines type of fetch policy status
type FetchPolicyStatusType string

const (
// FetchStatusOpen type of status: open
FetchStatusOpen FetchPolicyStatusType = "open"

// FetchStatusClosed type of status: closed
FetchStatusClosed FetchPolicyStatusType = "closed"
)

+ 880
- 0
modules/obs/convert.go View File

@@ -0,0 +1,880 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
"time"
)

func cleanHeaderPrefix(header http.Header) map[string][]string {
responseHeaders := make(map[string][]string)
for key, value := range header {
if len(value) > 0 {
key = strings.ToLower(key)
if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) {
key = key[len(HEADER_PREFIX):]
}
responseHeaders[key] = value
}
}
return responseHeaders
}

// ParseStringToEventType converts string value to EventType value and returns it
func ParseStringToEventType(value string) (ret EventType) {
switch value {
case "ObjectCreated:*", "s3:ObjectCreated:*":
ret = ObjectCreatedAll
case "ObjectCreated:Put", "s3:ObjectCreated:Put":
ret = ObjectCreatedPut
case "ObjectCreated:Post", "s3:ObjectCreated:Post":
ret = ObjectCreatedPost
case "ObjectCreated:Copy", "s3:ObjectCreated:Copy":
ret = ObjectCreatedCopy
case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload":
ret = ObjectCreatedCompleteMultipartUpload
case "ObjectRemoved:*", "s3:ObjectRemoved:*":
ret = ObjectRemovedAll
case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete":
ret = ObjectRemovedDelete
case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated":
ret = ObjectRemovedDeleteMarkerCreated
default:
ret = ""
}
return
}

// ParseStringToStorageClassType converts string value to StorageClassType value and returns it
func ParseStringToStorageClassType(value string) (ret StorageClassType) {
switch value {
case "STANDARD":
ret = StorageClassStandard
case "STANDARD_IA", "WARM":
ret = StorageClassWarm
case "GLACIER", "COLD":
ret = StorageClassCold
default:
ret = ""
}
return
}

func prepareGrantURI(grant Grant) string {
if grant.Grantee.URI == GroupAllUsers || grant.Grantee.URI == GroupAuthenticatedUsers {
return fmt.Sprintf("<URI>%s%s</URI>", "http://acs.amazonaws.com/groups/global/", grant.Grantee.URI)
}
if grant.Grantee.URI == GroupLogDelivery {
return fmt.Sprintf("<URI>%s%s</URI>", "http://acs.amazonaws.com/groups/s3/", grant.Grantee.URI)
}
return fmt.Sprintf("<URI>%s</URI>", grant.Grantee.URI)
}

func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string {
xml := make([]string, 0, 4)

if grant.Grantee.Type == GranteeUser {
if isObs {
xml = append(xml, "<Grant><Grantee>")
} else {
xml = append(xml, fmt.Sprintf("<Grant><Grantee xsi:type=\"%s\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">", grant.Grantee.Type))
}
if grant.Grantee.ID != "" {
granteeID := XmlTranscoding(grant.Grantee.ID)
xml = append(xml, fmt.Sprintf("<ID>%s</ID>", granteeID))
}
if !isObs && grant.Grantee.DisplayName != "" {
granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", granteeDisplayName))
}
xml = append(xml, "</Grantee>")
} else {
if !isObs {
xml = append(xml, fmt.Sprintf("<Grant><Grantee xsi:type=\"%s\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">", grant.Grantee.Type))
xml = append(xml, prepareGrantURI(grant))
xml = append(xml, "</Grantee>")
} else if grant.Grantee.URI == GroupAllUsers {
xml = append(xml, "<Grant><Grantee>")
xml = append(xml, fmt.Sprintf("<Canned>Everyone</Canned>"))
xml = append(xml, "</Grantee>")
} else {
return strings.Join(xml, "")
}
}

xml = append(xml, fmt.Sprintf("<Permission>%s</Permission>", grant.Permission))
if isObs && isBucket {
xml = append(xml, fmt.Sprintf("<Delivered>%t</Delivered>", grant.Delivered))
}
xml = append(xml, fmt.Sprintf("</Grant>"))
return strings.Join(xml, "")
}

func hasLoggingTarget(input BucketLoggingStatus) bool {
if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 {
return true
}
return false
}

// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it
func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) {
grantsLength := len(input.TargetGrants)
xml := make([]string, 0, 8+grantsLength)

xml = append(xml, "<BucketLoggingStatus>")
if isObs && input.Agency != "" {
agency := XmlTranscoding(input.Agency)
xml = append(xml, fmt.Sprintf("<Agency>%s</Agency>", agency))
}
if hasLoggingTarget(input) {
xml = append(xml, "<LoggingEnabled>")
if input.TargetBucket != "" {
xml = append(xml, fmt.Sprintf("<TargetBucket>%s</TargetBucket>", input.TargetBucket))
}
if input.TargetPrefix != "" {
targetPrefix := XmlTranscoding(input.TargetPrefix)
xml = append(xml, fmt.Sprintf("<TargetPrefix>%s</TargetPrefix>", targetPrefix))
}
if grantsLength > 0 {
xml = append(xml, "<TargetGrants>")
for _, grant := range input.TargetGrants {
xml = append(xml, convertGrantToXML(grant, isObs, false))
}
xml = append(xml, "</TargetGrants>")
}

xml = append(xml, "</LoggingEnabled>")
}
xml = append(xml, "</BucketLoggingStatus>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it
func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 4+len(input.Grants))
ownerID := XmlTranscoding(input.Owner.ID)
xml = append(xml, fmt.Sprintf("<AccessControlPolicy><Owner><ID>%s</ID>", ownerID))
if !isObs && input.Owner.DisplayName != "" {
ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", ownerDisplayName))
}
if isObs && input.Delivered != "" {
objectDelivered := XmlTranscoding(input.Delivered)
xml = append(xml, fmt.Sprintf("</Owner><Delivered>%s</Delivered><AccessControlList>", objectDelivered))
} else {
xml = append(xml, "</Owner><AccessControlList>")
}
for _, grant := range input.Grants {
xml = append(xml, convertGrantToXML(grant, isObs, false))
}
xml = append(xml, "</AccessControlList></AccessControlPolicy>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 4+len(input.Grants))
ownerID := XmlTranscoding(input.Owner.ID)
xml = append(xml, fmt.Sprintf("<AccessControlPolicy><Owner><ID>%s</ID>", ownerID))
if !isObs && input.Owner.DisplayName != "" {
ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", ownerDisplayName))
}

xml = append(xml, "</Owner><AccessControlList>")

for _, grant := range input.Grants {
xml = append(xml, convertGrantToXML(grant, isObs, true))
}
xml = append(xml, "</AccessControlList></AccessControlPolicy>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertConditionToXML(condition Condition) string {
xml := make([]string, 0, 2)
if condition.KeyPrefixEquals != "" {
keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals)
xml = append(xml, fmt.Sprintf("<KeyPrefixEquals>%s</KeyPrefixEquals>", keyPrefixEquals))
}
if condition.HttpErrorCodeReturnedEquals != "" {
xml = append(xml, fmt.Sprintf("<HttpErrorCodeReturnedEquals>%s</HttpErrorCodeReturnedEquals>", condition.HttpErrorCodeReturnedEquals))
}
if len(xml) > 0 {
return fmt.Sprintf("<Condition>%s</Condition>", strings.Join(xml, ""))
}
return ""
}

func prepareRoutingRule(input BucketWebsiteConfiguration) string {
xml := make([]string, 0, len(input.RoutingRules)*10)
for _, routingRule := range input.RoutingRules {
xml = append(xml, "<RoutingRule>")
xml = append(xml, "<Redirect>")
if routingRule.Redirect.Protocol != "" {
xml = append(xml, fmt.Sprintf("<Protocol>%s</Protocol>", routingRule.Redirect.Protocol))
}
if routingRule.Redirect.HostName != "" {
xml = append(xml, fmt.Sprintf("<HostName>%s</HostName>", routingRule.Redirect.HostName))
}
if routingRule.Redirect.ReplaceKeyPrefixWith != "" {
replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith)
xml = append(xml, fmt.Sprintf("<ReplaceKeyPrefixWith>%s</ReplaceKeyPrefixWith>", replaceKeyPrefixWith))
}

if routingRule.Redirect.ReplaceKeyWith != "" {
replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith)
xml = append(xml, fmt.Sprintf("<ReplaceKeyWith>%s</ReplaceKeyWith>", replaceKeyWith))
}
if routingRule.Redirect.HttpRedirectCode != "" {
xml = append(xml, fmt.Sprintf("<HttpRedirectCode>%s</HttpRedirectCode>", routingRule.Redirect.HttpRedirectCode))
}
xml = append(xml, "</Redirect>")

if ret := convertConditionToXML(routingRule.Condition); ret != "" {
xml = append(xml, ret)
}
xml = append(xml, "</RoutingRule>")
}
return strings.Join(xml, "")
}

// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it
func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) {
routingRuleLength := len(input.RoutingRules)
xml := make([]string, 0, 6+routingRuleLength*10)
xml = append(xml, "<WebsiteConfiguration>")

if input.RedirectAllRequestsTo.HostName != "" {
xml = append(xml, fmt.Sprintf("<RedirectAllRequestsTo><HostName>%s</HostName>", input.RedirectAllRequestsTo.HostName))
if input.RedirectAllRequestsTo.Protocol != "" {
xml = append(xml, fmt.Sprintf("<Protocol>%s</Protocol>", input.RedirectAllRequestsTo.Protocol))
}
xml = append(xml, "</RedirectAllRequestsTo>")
} else {
if input.IndexDocument.Suffix != "" {
indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix)
xml = append(xml, fmt.Sprintf("<IndexDocument><Suffix>%s</Suffix></IndexDocument>", indexDocumentSuffix))
}
if input.ErrorDocument.Key != "" {
errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key)
xml = append(xml, fmt.Sprintf("<ErrorDocument><Key>%s</Key></ErrorDocument>", errorDocumentKey))
}
if routingRuleLength > 0 {
xml = append(xml, "<RoutingRules>")
xml = append(xml, prepareRoutingRule(input))
xml = append(xml, "</RoutingRules>")
}
}

xml = append(xml, "</WebsiteConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertTransitionsToXML(transitions []Transition, isObs bool) string {
if length := len(transitions); length > 0 {
xml := make([]string, 0, length)
for _, transition := range transitions {
var temp string
if transition.Days > 0 {
temp = fmt.Sprintf("<Days>%d</Days>", transition.Days)
} else if !transition.Date.IsZero() {
temp = fmt.Sprintf("<Date>%s</Date>", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
}
if temp != "" {
if !isObs {
storageClass := string(transition.StorageClass)
if transition.StorageClass == StorageClassWarm {
storageClass = string(storageClassStandardIA)
} else if transition.StorageClass == StorageClassCold {
storageClass = string(storageClassGlacier)
}
xml = append(xml, fmt.Sprintf("<Transition>%s<StorageClass>%s</StorageClass></Transition>", temp, storageClass))
} else {
xml = append(xml, fmt.Sprintf("<Transition>%s<StorageClass>%s</StorageClass></Transition>", temp, transition.StorageClass))
}
}
}
return strings.Join(xml, "")
}
return ""
}

func convertExpirationToXML(expiration Expiration) string {
if expiration.Days > 0 {
return fmt.Sprintf("<Expiration><Days>%d</Days></Expiration>", expiration.Days)
} else if !expiration.Date.IsZero() {
return fmt.Sprintf("<Expiration><Date>%s</Date></Expiration>", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
}
return ""
}
func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string {
if length := len(noncurrentVersionTransitions); length > 0 {
xml := make([]string, 0, length)
for _, noncurrentVersionTransition := range noncurrentVersionTransitions {
if noncurrentVersionTransition.NoncurrentDays > 0 {
storageClass := string(noncurrentVersionTransition.StorageClass)
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
xml = append(xml, fmt.Sprintf("<NoncurrentVersionTransition><NoncurrentDays>%d</NoncurrentDays>"+
"<StorageClass>%s</StorageClass></NoncurrentVersionTransition>",
noncurrentVersionTransition.NoncurrentDays, storageClass))
}
}
return strings.Join(xml, "")
}
return ""
}
func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string {
if noncurrentVersionExpiration.NoncurrentDays > 0 {
return fmt.Sprintf("<NoncurrentVersionExpiration><NoncurrentDays>%d</NoncurrentDays></NoncurrentVersionExpiration>", noncurrentVersionExpiration.NoncurrentDays)
}
return ""
}

// ConvertLifecyleConfigurationToXml converts BucketLifecyleConfiguration value to XML data and returns it
func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 2+len(input.LifecycleRules)*9)
xml = append(xml, "<LifecycleConfiguration>")
for _, lifecyleRule := range input.LifecycleRules {
xml = append(xml, "<Rule>")
if lifecyleRule.ID != "" {
lifecyleRuleID := XmlTranscoding(lifecyleRule.ID)
xml = append(xml, fmt.Sprintf("<ID>%s</ID>", lifecyleRuleID))
}
lifecyleRulePrefix := XmlTranscoding(lifecyleRule.Prefix)
xml = append(xml, fmt.Sprintf("<Prefix>%s</Prefix>", lifecyleRulePrefix))
xml = append(xml, fmt.Sprintf("<Status>%s</Status>", lifecyleRule.Status))
if ret := convertTransitionsToXML(lifecyleRule.Transitions, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := convertExpirationToXML(lifecyleRule.Expiration); ret != "" {
xml = append(xml, ret)
}
if ret := convertNoncurrentVersionTransitionsToXML(lifecyleRule.NoncurrentVersionTransitions, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := convertNoncurrentVersionExpirationToXML(lifecyleRule.NoncurrentVersionExpiration); ret != "" {
xml = append(xml, ret)
}
xml = append(xml, "</Rule>")
}
xml = append(xml, "</LifecycleConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string {
if length := len(filterRules); length > 0 {
xml := make([]string, 0, length*4)
for _, filterRule := range filterRules {
xml = append(xml, "<FilterRule>")
if filterRule.Name != "" {
filterRuleName := XmlTranscoding(filterRule.Name)
xml = append(xml, fmt.Sprintf("<Name>%s</Name>", filterRuleName))
}
if filterRule.Value != "" {
filterRuleValue := XmlTranscoding(filterRule.Value)
xml = append(xml, fmt.Sprintf("<Value>%s</Value>", filterRuleValue))
}
xml = append(xml, "</FilterRule>")
}
if !isObs {
return fmt.Sprintf("<Filter><S3Key>%s</S3Key></Filter>", strings.Join(xml, ""))
}
return fmt.Sprintf("<Filter><Object>%s</Object></Filter>", strings.Join(xml, ""))
}
return ""
}

func converntEventsToXML(events []EventType, isObs bool) string {
if length := len(events); length > 0 {
xml := make([]string, 0, length)
if !isObs {
for _, event := range events {
xml = append(xml, fmt.Sprintf("<Event>%s%s</Event>", "s3:", event))
}
} else {
for _, event := range events {
xml = append(xml, fmt.Sprintf("<Event>%s</Event>", event))
}
}
return strings.Join(xml, "")
}
return ""
}

func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string {
xml := make([]string, 0, 6)
xml = append(xml, xmlElem)
if topicConfiguration.ID != "" {
topicConfigurationID := XmlTranscoding(topicConfiguration.ID)
xml = append(xml, fmt.Sprintf("<Id>%s</Id>", topicConfigurationID))
}
topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic)
xml = append(xml, fmt.Sprintf("<Topic>%s</Topic>", topicConfigurationTopic))

if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" {
xml = append(xml, ret)
}
tempElem := xmlElem[0:1] + "/" + xmlElem[1:]
xml = append(xml, tempElem)
return strings.Join(xml, "")
}

// ConverntObsRestoreToXml converts RestoreObjectInput value to XML data and returns it
func ConverntObsRestoreToXml(restoreObjectInput RestoreObjectInput) string {
xml := make([]string, 0, 2)
xml = append(xml, fmt.Sprintf("<RestoreRequest><Days>%d</Days>", restoreObjectInput.Days))
if restoreObjectInput.Tier != "Bulk" {
xml = append(xml, fmt.Sprintf("<RestoreJob><Tier>%s</Tier></RestoreJob>", restoreObjectInput.Tier))
}
xml = append(xml, fmt.Sprintf("</RestoreRequest>"))
data := strings.Join(xml, "")
return data
}

// ConvertNotificationToXml converts BucketNotification value to XML data and returns it
func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 2+len(input.TopicConfigurations)*6)
xml = append(xml, "<NotificationConfiguration>")
for _, topicConfiguration := range input.TopicConfigurations {
ret := converntConfigureToXML(topicConfiguration, "<TopicConfiguration>", isObs)
xml = append(xml, ret)
}
xml = append(xml, "</NotificationConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it
func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) {
xml := make([]string, 0, 2+len(input.Parts)*4)
xml = append(xml, "<CompleteMultipartUpload>")
for _, part := range input.Parts {
xml = append(xml, "<Part>")
xml = append(xml, fmt.Sprintf("<PartNumber>%d</PartNumber>", part.PartNumber))
xml = append(xml, fmt.Sprintf("<ETag>%s</ETag>", part.ETag))
xml = append(xml, "</Part>")
}
xml = append(xml, "</CompleteMultipartUpload>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) {
if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok {
sseCHeader := SseCHeader{Encryption: ret[0]}
if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok {
sseCHeader.KeyMD5 = ret[0]
}
sseHeader = sseCHeader
} else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok {
sseKmsHeader := SseKmsHeader{Encryption: ret[0]}
if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok {
sseKmsHeader.Key = ret[0]
} else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok {
sseKmsHeader.Key = ret[0]
}
sseHeader = sseKmsHeader
}
return
}

func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) {
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok {
AllowOrigin = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok {
AllowHeader = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok {
MaxAgeSeconds = StringToInt(ret[0], 0)
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok {
AllowMethod = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok {
ExposeHeader = ret[0]
}
return
}

func parseUnCommonHeader(output *GetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
output.WebsiteRedirectLocation = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok {
output.Expiration = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok {
output.Restore = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok {
output.ObjectType = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok {
output.NextAppendPosition = ret[0]
}
}

// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers
func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) {
output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
parseUnCommonHeader(output)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
output.ContentType = ret[0]
}

output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok {
ret, err := time.Parse(time.RFC1123, ret[0])
if err == nil {
output.LastModified = ret
}
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok {
output.ContentLength = StringToInt64(ret[0], 0)
}

output.Metadata = make(map[string]string)

for key, value := range output.ResponseHeaders {
if strings.HasPrefix(key, PREFIX_META) {
_key := key[len(PREFIX_META):]
output.ResponseHeaders[_key] = value
output.Metadata[_key] = value[0]
delete(output.ResponseHeaders, key)
}
}

}

// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers
func ParseCopyObjectOutput(output *CopyObjectOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok {
output.CopySourceVersionId = ret[0]
}
}

// ParsePutObjectOutput sets PutObjectOutput field values with response headers
func ParsePutObjectOutput(output *PutObjectOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
}

// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers
func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
}

// ParseUploadPartOutput sets UploadPartOutput field values with response headers
func ParseUploadPartOutput(output *UploadPartOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
}

// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers
func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
}

// ParseCopyPartOutput sets CopyPartOutput field values with response headers
func ParseCopyPartOutput(output *CopyPartOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
}

// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers
func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) {
output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
} else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok {
output.Version = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = ret[0]
} else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok {
output.Location = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok {
output.Epid = ret[0]
}
}

func parseContentHeader(output *SetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
output.ContentDisposition = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
output.ContentEncoding = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
output.ContentLanguage = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
output.ContentType = ret[0]
}
}

// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers
func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
} else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok {
output.MetadataDirective = MetadataDirectiveType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
output.CacheControl = ret[0]
}
parseContentHeader(output)
if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
output.Expires = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
output.WebsiteRedirectLocation = ret[0]
}
output.Metadata = make(map[string]string)

for key, value := range output.ResponseHeaders {
if strings.HasPrefix(key, PREFIX_META) {
_key := key[len(PREFIX_META):]
output.ResponseHeaders[_key] = value
output.Metadata[_key] = value[0]
delete(output.ResponseHeaders, key)
}
}
}

// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers
func ParseDeleteObjectOutput(output *DeleteObjectOutput) {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}

if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
output.DeleteMarker = deleteMarker[0] == "true"
}
}

// ParseGetObjectOutput sets GetObjectOutput field values with response headers
func ParseGetObjectOutput(output *GetObjectOutput) {
ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput)
if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
output.DeleteMarker = ret[0] == "true"
}
if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
output.CacheControl = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
output.ContentDisposition = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
output.ContentEncoding = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
output.ContentLanguage = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
output.Expires = ret[0]
}
}

// ConvertRequestToIoReaderV2 converts req to XML data
func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) {
data, err := TransToXml(req)
if err == nil {
if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data))
}
return bytes.NewReader(data), Base64Md5(data), nil
}
return nil, "", err
}

// ConvertRequestToIoReader converts req to XML data
func ConvertRequestToIoReader(req interface{}) (io.Reader, error) {
body, err := TransToXml(req)
if err == nil {
if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body))
}
return bytes.NewReader(body), nil
}
return nil, err
}

// ParseResponseToBaseModel gets response from OBS
func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) {
readCloser, ok := baseModel.(IReadCloser)
if !ok {
defer func() {
errMsg := resp.Body.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close response body")
}
}()
body, err := ioutil.ReadAll(resp.Body)
if err == nil && len(body) > 0 {
if xmlResult {
err = ParseXml(body, baseModel)
} else {
s := reflect.TypeOf(baseModel).Elem()
if reflect.TypeOf(baseModel).Elem().Name() == "GetBucketPolicyOutput" {
for i := 0; i < s.NumField(); i++ {
if s.Field(i).Tag == "json:\"body\"" {
reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body))
break
}
}
} else {
err = parseJSON(body, baseModel)
}
}
if err != nil {
doLog(LEVEL_ERROR, "Unmarshal error: %v", err)
}
}
} else {
readCloser.setReadCloser(resp.Body)
}

baseModel.setStatusCode(resp.StatusCode)
responseHeaders := cleanHeaderPrefix(resp.Header)
baseModel.setResponseHeaders(responseHeaders)
if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {
baseModel.setRequestID(values[0])
}
return
}

// ParseResponseToObsError gets obsError from OBS
func ParseResponseToObsError(resp *http.Response, isObs bool) error {
isJson := false
if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok {
jsonType, _ := mimeTypes["json"]
isJson = contentType[0] == jsonType
}
obsError := ObsError{}
respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs)
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
obsError.Status = resp.Status
return obsError
}

// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format
func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) {
fetch := map[string]SetBucketFetchPolicyInput{"fetch": input}
json, err := json.Marshal(fetch)
if err != nil {
return "", err
}
data = string(json)
return
}

// convertFetchJobToJSON converts SetBucketFetchJobInput into json format
func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) {
objectHeaders := make(map[string]string)
for key, value := range input.ObjectHeaders {
if value != "" {
_key := strings.ToLower(key)
if !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
_key = HEADER_PREFIX_META_OBS + _key
}
objectHeaders[_key] = value
}
}
input.ObjectHeaders = objectHeaders
json, err := json.Marshal(input)
if err != nil {
return "", err
}
data = string(json)
return
}

+ 35
- 0
modules/obs/error.go View File

@@ -0,0 +1,35 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"encoding/xml"
"fmt"
)

// ObsError defines error response from OBS
type ObsError struct {
BaseModel
Status string
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code" json:"code"`
Message string `xml:"Message" json:"message"`
Resource string `xml:"Resource"`
HostId string `xml:"HostId"`
}

func (err ObsError) Error() string {
return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s",
err.Status, err.Code, err.Message, err.RequestId)
}

+ 37
- 0
modules/obs/extension.go View File

@@ -0,0 +1,37 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"fmt"
"strings"
)

type extensionOptions interface{}
type extensionHeaders func(headers map[string][]string, isObs bool) error

func setHeaderPrefix(key string, value string) extensionHeaders {
return func(headers map[string][]string, isObs bool) error {
if strings.TrimSpace(value) == "" {
return fmt.Errorf("set header %s with empty value", key)
}
setHeaders(headers, key, []string{value}, isObs)
return nil
}
}

// WithReqPaymentHeader sets header for requester-pays
func WithReqPaymentHeader(requester PayerType) extensionHeaders {
return setHeaderPrefix(REQUEST_PAYER, string(requester))
}

+ 566
- 0
modules/obs/http.go View File

@@ -0,0 +1,566 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
)

func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string {
_headers := make(map[string][]string, len(headers))
if headers != nil {
for key, value := range headers {
key = strings.TrimSpace(key)
if key == "" {
continue
}
_key := strings.ToLower(key)
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
if !meta {
continue
}
if !isObs {
_key = HEADER_PREFIX_META + _key
} else {
_key = HEADER_PREFIX_META_OBS + _key
}
} else {
_key = key
}
_headers[_key] = value
}
}
return _headers
}

func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient.doAction(action, method, "", "", input, output, true, true, extensions)
}

func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions)
}

func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions)
}

func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions)
}

func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
if strings.TrimSpace(objectKey) == "" {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions)
}

func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions)
}

func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error {
if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
return errors.New("Bucket is empty")
}
if strings.TrimSpace(objectKey) == "" {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions)
}

func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error {

var resp *http.Response
var respError error
doLog(LEVEL_INFO, "Enter method %s...", action)
start := GetCurrentTimestamp()

params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs)
if err != nil {
return err
}

if params == nil {
params = make(map[string]string)
}

if headers == nil {
headers = make(map[string][]string)
}

for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
if _err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_WARN, "Unsupported extensionOptions")
}
}

switch method {
case HTTP_GET:
resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_POST:
resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_PUT:
resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_DELETE:
resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_HEAD:
resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable)
case HTTP_OPTIONS:
resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable)
default:
respError = errors.New("Unexpect http method error")
}
if respError == nil && output != nil {
respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
} else {
doLog(LEVEL_WARN, "Do http request with error: %v", respError)
}

if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}

return respError
}

func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
}

func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
req, err := http.NewRequest(method, signedURL, data)
if err != nil {
return err
}
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
var resp *http.Response

var isSecurityToken bool
var securityToken string
var query []string
parmas := strings.Split(signedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:]
isSecurityToken = true
}
}
}
}
logSignedURL := signedURL
if isSecurityToken {
logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1)
}
doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL)

req.Header = actualSignedRequestHeaders
if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST_CAMEL)
} else if value, ok := req.Header[HEADER_HOST]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST)
}

if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
} else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH)
}

req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}
start := GetCurrentTimestamp()
resp, err = obsClient.httpClient.Do(req)
if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
}

var msg interface{}
if err != nil {
respError = err
resp = nil
} else {
doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
if resp.StatusCode >= 300 {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
msg = resp.Status
resp = nil
} else {
if output != nil {
respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
}
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
}
}

if msg != nil {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
}

if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}

return
}

func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) {

bucketName = strings.TrimSpace(bucketName)

method = strings.ToUpper(method)

var redirectURL string
var requestURL string
maxRetryCount := obsClient.conf.maxRetryCount
maxRedirectCount := obsClient.conf.maxRedirectCount

var _data io.Reader
if data != nil {
if dataStr, ok := data.(string); ok {
doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr)
headers["Content-Length"] = []string{IntToString(len(dataStr))}
_data = strings.NewReader(dataStr)
} else if dataByte, ok := data.([]byte); ok {
doLog(LEVEL_DEBUG, "Do http request with byte array")
headers["Content-Length"] = []string{IntToString(len(dataByte))}
_data = bytes.NewReader(dataByte)
} else if dataReader, ok := data.(io.Reader); ok {
_data = dataReader
} else {
doLog(LEVEL_WARN, "Data is not a valid io.Reader")
return nil, errors.New("Data is not a valid io.Reader")
}
}

var lastRequest *http.Request
redirectFlag := false
for i, redirectCount := 0, 0; i <= maxRetryCount; i++ {
if redirectURL != "" {
if !redirectFlag {
parsedRedirectURL, err := url.Parse(redirectURL)
if err != nil {
return nil, err
}
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host)
if err != nil {
return nil, err
}
if parsedRequestURL, err := url.Parse(requestURL); err != nil {
return nil, err
} else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" {
redirectURL += "?" + parsedRequestURL.RawQuery
}
}
requestURL = redirectURL
} else {
var err error
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
if err != nil {
return nil, err
}
}

req, err := http.NewRequest(method, requestURL, _data)
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
if err != nil {
return nil, err
}
doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method)

if isDebugLogEnabled() {
auth := headers[HEADER_AUTH_CAMEL]
delete(headers, HEADER_AUTH_CAMEL)

var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
} else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
}
doLog(LEVEL_DEBUG, "Request headers: %v", headers)
headers[HEADER_AUTH_CAMEL] = auth
if isSecurityToken {
if obsClient.conf.signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = securityToken
} else {
headers[HEADER_STS_TOKEN_AMZ] = securityToken
}
}
}

for key, value := range headers {
if key == HEADER_HOST_CAMEL {
req.Host = value[0]
delete(headers, key)
} else if key == HEADER_CONTENT_LENGTH_CAMEL {
req.ContentLength = StringToInt64(value[0], -1)
delete(headers, key)
} else {
req.Header[key] = value
}
}

lastRequest = req

req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}

if lastRequest != nil {
req.Host = lastRequest.Host
req.ContentLength = lastRequest.ContentLength
}

start := GetCurrentTimestamp()
resp, err = obsClient.httpClient.Do(req)
if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
}

var msg interface{}
if err != nil {
msg = err
respError = err
resp = nil
if !repeatable {
break
}
} else {
doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
if resp.StatusCode < 300 {
break
} else if !repeatable || (resp.StatusCode >= 400 && resp.StatusCode < 500) || resp.StatusCode == 304 {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
break
} else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
if location := resp.Header.Get(HEADER_LOCATION_CAMEL); location != "" && redirectCount < maxRedirectCount {
redirectURL = location
doLog(LEVEL_WARN, "Redirect request to %s", redirectURL)
msg = resp.Status
maxRetryCount++
redirectCount++
if resp.StatusCode == 302 && method == HTTP_GET {
redirectFlag = true
} else {
redirectFlag = false
}
} else {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
break
}
} else {
msg = resp.Status
}
}
if i != maxRetryCount {
if resp != nil {
_err := resp.Body.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close resp body")
}
resp = nil
}
if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
delete(headers, HEADER_AUTH_CAMEL)
}
doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
if r, ok := _data.(*strings.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, err
}
} else if r, ok := _data.(*bytes.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, err
}
} else if r, ok := _data.(*fileReaderWrapper); ok {
fd, err := os.Open(r.filePath)
if err != nil {
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close with reason: %v", errMsg)
}
}()
fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
fileReaderWrapper.mark = r.mark
fileReaderWrapper.reader = fd
fileReaderWrapper.totalCount = r.totalCount
_data = fileReaderWrapper
_, err = fd.Seek(r.mark, 0)
if err != nil {
return nil, err
}
} else if r, ok := _data.(*readerWrapper); ok {
_, err := r.seek(0, 0)
if err != nil {
return nil, err
}
}
time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
} else {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
if resp != nil {
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
resp = nil
}
}
}
return
}

type connDelegate struct {
conn net.Conn
socketTimeout time.Duration
finalTimeout time.Duration
}

func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
return &connDelegate{
conn: conn,
socketTimeout: time.Second * time.Duration(socketTimeout),
finalTimeout: time.Second * time.Duration(finalTimeout),
}
}

func (delegate *connDelegate) Read(b []byte) (n int, err error) {
setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()

if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}

n, err = delegate.conn.Read(b)
setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}

func (delegate *connDelegate) Write(b []byte) (n int, err error) {
setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}

n, err = delegate.conn.Write(b)
finalTimeout := time.Now().Add(delegate.finalTimeout)
setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout)
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}
setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout)
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}

func (delegate *connDelegate) Close() error {
return delegate.conn.Close()
}

func (delegate *connDelegate) LocalAddr() net.Addr {
return delegate.conn.LocalAddr()
}

func (delegate *connDelegate) RemoteAddr() net.Addr {
return delegate.conn.RemoteAddr()
}

func (delegate *connDelegate) SetDeadline(t time.Time) error {
return delegate.conn.SetDeadline(t)
}

func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
return delegate.conn.SetReadDeadline(t)
}

func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
return delegate.conn.SetWriteDeadline(t)
}

+ 317
- 0
modules/obs/log.go View File

@@ -0,0 +1,317 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
)

// Level defines the level of the log
type Level int

const (
LEVEL_OFF Level = 500
LEVEL_ERROR Level = 400
LEVEL_WARN Level = 300
LEVEL_INFO Level = 200
LEVEL_DEBUG Level = 100
)

var logLevelMap = map[Level]string{
LEVEL_OFF: "[OFF]: ",
LEVEL_ERROR: "[ERROR]: ",
LEVEL_WARN: "[WARN]: ",
LEVEL_INFO: "[INFO]: ",
LEVEL_DEBUG: "[DEBUG]: ",
}

type logConfType struct {
level Level
logToConsole bool
logFullPath string
maxLogSize int64
backups int
}

func getDefaultLogConf() logConfType {
return logConfType{
level: LEVEL_WARN,
logToConsole: false,
logFullPath: "",
maxLogSize: 1024 * 1024 * 30, //30MB
backups: 10,
}
}

var logConf logConfType

type loggerWrapper struct {
fullPath string
fd *os.File
ch chan string
wg sync.WaitGroup
queue []string
logger *log.Logger
index int
cacheCount int
closed bool
}

func (lw *loggerWrapper) doInit() {
lw.queue = make([]string, 0, lw.cacheCount)
lw.logger = log.New(lw.fd, "", 0)
lw.ch = make(chan string, lw.cacheCount)
lw.wg.Add(1)
go lw.doWrite()
}

func (lw *loggerWrapper) rotate() {
stat, err := lw.fd.Stat()
if err != nil {
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
panic(err)
}
if stat.Size() >= logConf.maxLogSize {
_err := lw.fd.Sync()
if _err != nil {
panic(err)
}
_err = lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
if lw.index > logConf.backups {
lw.index = 1
}
_err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
if _err != nil {
panic(err)
}
lw.index++

fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
panic(err)
}
lw.fd = fd
lw.logger.SetOutput(lw.fd)
}
}

func (lw *loggerWrapper) doFlush() {
lw.rotate()
for _, m := range lw.queue {
lw.logger.Println(m)
}
err := lw.fd.Sync()
if err != nil {
panic(err)
}
}

func (lw *loggerWrapper) doClose() {
lw.closed = true
close(lw.ch)
lw.wg.Wait()
}

func (lw *loggerWrapper) doWrite() {
defer lw.wg.Done()
for {
msg, ok := <-lw.ch
if !ok {
lw.doFlush()
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
break
}
if len(lw.queue) >= lw.cacheCount {
lw.doFlush()
lw.queue = make([]string, 0, lw.cacheCount)
}
lw.queue = append(lw.queue, msg)
}

}

func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
if !lw.closed {
msg := fmt.Sprintf(format, v...)
lw.ch <- msg
}
}

var consoleLogger *log.Logger
var fileLogger *loggerWrapper
var lock = new(sync.RWMutex)

func isDebugLogEnabled() bool {
return logConf.level <= LEVEL_DEBUG
}

func isErrorLogEnabled() bool {
return logConf.level <= LEVEL_ERROR
}

func isWarnLogEnabled() bool {
return logConf.level <= LEVEL_WARN
}

func isInfoLogEnabled() bool {
return logConf.level <= LEVEL_INFO
}

func reset() {
if fileLogger != nil {
fileLogger.doClose()
fileLogger = nil
}
consoleLogger = nil
logConf = getDefaultLogConf()
}

// InitLog enable logging function with default cacheCnt
func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error {
return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50)
}

// InitLogWithCacheCnt enable logging function
func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error {
lock.Lock()
defer lock.Unlock()
if cacheCnt <= 0 {
cacheCnt = 50
}
reset()
if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
_fullPath, err := filepath.Abs(fullPath)
if err != nil {
return err
}

if !strings.HasSuffix(_fullPath, ".log") {
_fullPath += ".log"
}

stat, err := os.Stat(_fullPath)
if err == nil && stat.IsDir() {
return fmt.Errorf("logFullPath:[%s] is a directory", _fullPath)
} else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
return err
}

fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return err
}

if stat == nil {
stat, err = os.Stat(_fullPath)
if err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return err
}
}

prefix := stat.Name() + "."
index := 1
var timeIndex int64 = 0
walkFunc := func(path string, info os.FileInfo, err error) error {
if err == nil {
if name := info.Name(); strings.HasPrefix(name, prefix) {
if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex {
timeIndex = info.ModTime().Unix()
index = i + 1
}
}
}
return err
}

if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return err
}

fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false}
fileLogger.doInit()
}
if maxLogSize > 0 {
logConf.maxLogSize = maxLogSize
}
if backups > 0 {
logConf.backups = backups
}
logConf.level = level
if logToConsole {
consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
}
return nil
}

// CloseLog disable logging and synchronize cache data to log files
func CloseLog() {
if logEnabled() {
lock.Lock()
defer lock.Unlock()
reset()
}
}

func logEnabled() bool {
return consoleLogger != nil || fileLogger != nil
}

// DoLog writes log messages to the logger
func DoLog(level Level, format string, v ...interface{}) {
doLog(level, format, v...)
}

func doLog(level Level, format string, v ...interface{}) {
if logEnabled() && logConf.level <= level {
msg := fmt.Sprintf(format, v...)
if _, file, line, ok := runtime.Caller(1); ok {
index := strings.LastIndex(file, "/")
if index >= 0 {
file = file[index+1:]
}
msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
}
prefix := logLevelMap[level]
if consoleLogger != nil {
consoleLogger.Printf("%s%s", prefix, msg)
}
if fileLogger != nil {
nowDate := FormatUtcNow("2006-01-02T15:04:05Z")
fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
}
}
}

+ 1236
- 0
modules/obs/model.go
File diff suppressed because it is too large
View File


+ 543
- 0
modules/obs/pool.go View File

@@ -0,0 +1,543 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:structcheck, unused
//nolint:golint, unused
package obs

import (
"errors"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
)

// Future defines interface with function: Get
type Future interface {
Get() interface{}
}

// FutureResult for task result
type FutureResult struct {
result interface{}
resultChan chan interface{}
lock sync.Mutex
}

type panicResult struct {
presult interface{}
}

func (f *FutureResult) checkPanic() interface{} {
if r, ok := f.result.(panicResult); ok {
panic(r.presult)
}
return f.result
}

// Get gets the task result
func (f *FutureResult) Get() interface{} {
if f.resultChan == nil {
return f.checkPanic()
}
f.lock.Lock()
defer f.lock.Unlock()
if f.resultChan == nil {
return f.checkPanic()
}

f.result = <-f.resultChan
close(f.resultChan)
f.resultChan = nil
return f.checkPanic()
}

// Task defines interface with function: Run
type Task interface {
Run() interface{}
}

type funcWrapper struct {
f func() interface{}
}

func (fw *funcWrapper) Run() interface{} {
if fw.f != nil {
return fw.f()
}
return nil
}

type taskWrapper struct {
t Task
f *FutureResult
}

func (tw *taskWrapper) Run() interface{} {
if tw.t != nil {
return tw.t.Run()
}
return nil
}

type signalTask struct {
id string
}

func (signalTask) Run() interface{} {
return nil
}

type worker struct {
name string
taskQueue chan Task
wg *sync.WaitGroup
pool *RoutinePool
}

func runTask(t Task) {
if tw, ok := t.(*taskWrapper); ok {
defer func() {
if r := recover(); r != nil {
tw.f.resultChan <- panicResult{
presult: r,
}
}
}()
ret := t.Run()
tw.f.resultChan <- ret
} else {
t.Run()
}
}

func (*worker) runTask(t Task) {
runTask(t)
}

func (w *worker) start() {
go func() {
defer func() {
if w.wg != nil {
w.wg.Done()
}
}()
for {
task, ok := <-w.taskQueue
if !ok {
break
}
w.pool.AddCurrentWorkingCnt(1)
w.runTask(task)
w.pool.AddCurrentWorkingCnt(-1)
if w.pool.autoTuneWorker(w) {
break
}
}
}()
}

func (w *worker) release() {
w.taskQueue = nil
w.wg = nil
w.pool = nil
}

// Pool defines coroutine pool interface
type Pool interface {
ShutDown()
Submit(t Task) (Future, error)
SubmitFunc(f func() interface{}) (Future, error)
Execute(t Task)
ExecuteFunc(f func() interface{})
GetMaxWorkerCnt() int64
AddMaxWorkerCnt(value int64) int64
GetCurrentWorkingCnt() int64
AddCurrentWorkingCnt(value int64) int64
GetWorkerCnt() int64
AddWorkerCnt(value int64) int64
EnableAutoTune()
}

type basicPool struct {
maxWorkerCnt int64
workerCnt int64
currentWorkingCnt int64
isShutDown int32
}

// ErrTaskInvalid will be returned if the task is nil
var ErrTaskInvalid = errors.New("Task is nil")

func (pool *basicPool) GetCurrentWorkingCnt() int64 {
return atomic.LoadInt64(&pool.currentWorkingCnt)
}

func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 {
return atomic.AddInt64(&pool.currentWorkingCnt, value)
}

func (pool *basicPool) GetWorkerCnt() int64 {
return atomic.LoadInt64(&pool.workerCnt)
}

func (pool *basicPool) AddWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.workerCnt, value)
}

func (pool *basicPool) GetMaxWorkerCnt() int64 {
return atomic.LoadInt64(&pool.maxWorkerCnt)
}

func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.maxWorkerCnt, value)
}

func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool {
return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue)
}

func (pool *basicPool) EnableAutoTune() {

}

// RoutinePool defines the coroutine pool struct
type RoutinePool struct {
basicPool
taskQueue chan Task
dispatchQueue chan Task
workers map[string]*worker
cacheCnt int
wg *sync.WaitGroup
lock *sync.Mutex
shutDownWg *sync.WaitGroup
autoTune int32
}

// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function
var ErrSubmitTimeout = errors.New("Submit task timeout")

// ErrPoolShutDown will be returned if RoutinePool is shutdown
var ErrPoolShutDown = errors.New("RoutinePool is shutdown")

// ErrTaskReject will be returned if submit task is rejected
var ErrTaskReject = errors.New("Submit task is rejected")

var closeQueue = signalTask{id: "closeQueue"}

// NewRoutinePool creates a RoutinePool instance
func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}

pool := &RoutinePool{
cacheCnt: cacheCnt,
wg: new(sync.WaitGroup),
lock: new(sync.Mutex),
shutDownWg: new(sync.WaitGroup),
autoTune: 0,
}
pool.isShutDown = 0
pool.maxWorkerCnt += int64(maxWorkerCnt)
if pool.cacheCnt <= 0 {
pool.taskQueue = make(chan Task)
} else {
pool.taskQueue = make(chan Task, pool.cacheCnt)
}
pool.workers = make(map[string]*worker, pool.maxWorkerCnt)
// dispatchQueue must not have length
pool.dispatchQueue = make(chan Task)
pool.dispatcher()

return pool
}

// EnableAutoTune sets the autoTune enabled
func (pool *RoutinePool) EnableAutoTune() {
atomic.StoreInt32(&pool.autoTune, 1)
}

func (pool *RoutinePool) checkStatus(t Task) error {
if t == nil {
return ErrTaskInvalid
}

if atomic.LoadInt32(&pool.isShutDown) == 1 {
return ErrPoolShutDown
}
return nil
}

func (pool *RoutinePool) dispatcher() {
pool.shutDownWg.Add(1)
go func() {
for {
task, ok := <-pool.dispatchQueue
if !ok {
break
}

if task == closeQueue {
close(pool.taskQueue)
pool.shutDownWg.Done()
continue
}

if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() {
pool.addWorker()
}

pool.taskQueue <- task
}
}()
}

// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it
func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 {
if atomic.LoadInt32(&pool.autoTune) == 1 {
return pool.basicPool.AddMaxWorkerCnt(value)
}
return pool.GetMaxWorkerCnt()
}

func (pool *RoutinePool) addWorker() {
if atomic.LoadInt32(&pool.autoTune) == 1 {
pool.lock.Lock()
defer pool.lock.Unlock()
}
w := &worker{}
w.name = fmt.Sprintf("woker-%d", len(pool.workers))
w.taskQueue = pool.taskQueue
w.wg = pool.wg
pool.AddWorkerCnt(1)
w.pool = pool
pool.workers[w.name] = w
pool.wg.Add(1)
w.start()
}

func (pool *RoutinePool) autoTuneWorker(w *worker) bool {
if atomic.LoadInt32(&pool.autoTune) == 0 {
return false
}

if w == nil {
return false
}

workerCnt := pool.GetWorkerCnt()
maxWorkerCnt := pool.GetMaxWorkerCnt()
if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) {
pool.lock.Lock()
defer pool.lock.Unlock()
delete(pool.workers, w.name)
w.wg.Done()
w.release()
return true
}

return false
}

// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *RoutinePool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}

// Execute pushes the specified task to the dispatchQueue
func (pool *RoutinePool) Execute(t Task) {
if t != nil {
pool.dispatchQueue <- t
}
}

// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}

// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info
func (pool *RoutinePool) Submit(t Task) (Future, error) {
if err := pool.checkStatus(t); err != nil {
return nil, err
}
f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
pool.dispatchQueue <- tw
return f, nil
}

// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info.
// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time.
func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) {
if timeout <= 0 {
return pool.Submit(t)
}
if err := pool.checkStatus(t); err != nil {
return nil, err
}
timeoutChan := make(chan bool, 1)
go func() {
time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout)))
timeoutChan <- true
close(timeoutChan)
}()

f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
select {
case pool.dispatchQueue <- tw:
return f, nil
case _, ok := <-timeoutChan:
if ok {
return nil, ErrSubmitTimeout
}
return nil, ErrSubmitTimeout
}
}

func (pool *RoutinePool) beforeCloseDispatchQueue() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.dispatchQueue <- closeQueue
pool.wg.Wait()
}

func (pool *RoutinePool) doCloseDispatchQueue() {
close(pool.dispatchQueue)
pool.shutDownWg.Wait()
}

// ShutDown closes the RoutinePool instance
func (pool *RoutinePool) ShutDown() {
pool.beforeCloseDispatchQueue()
pool.doCloseDispatchQueue()
for _, w := range pool.workers {
w.release()
}
pool.workers = nil
pool.taskQueue = nil
pool.dispatchQueue = nil
}

// NoChanPool defines the coroutine pool struct
type NoChanPool struct {
basicPool
wg *sync.WaitGroup
tokens chan interface{}
}

// NewNochanPool creates a new NoChanPool instance
func NewNochanPool(maxWorkerCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}

pool := &NoChanPool{
wg: new(sync.WaitGroup),
tokens: make(chan interface{}, maxWorkerCnt),
}
pool.isShutDown = 0
pool.AddMaxWorkerCnt(int64(maxWorkerCnt))

for i := 0; i < maxWorkerCnt; i++ {
pool.tokens <- struct{}{}
}

return pool
}

func (pool *NoChanPool) acquire() {
<-pool.tokens
}

func (pool *NoChanPool) release() {
pool.tokens <- 1
}

func (pool *NoChanPool) execute(t Task) {
pool.wg.Add(1)
go func() {
pool.acquire()
defer func() {
pool.release()
pool.wg.Done()
}()
runTask(t)
}()
}

// ShutDown closes the NoChanPool instance
func (pool *NoChanPool) ShutDown() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.wg.Wait()
}

// Execute executes the specified task
func (pool *NoChanPool) Execute(t Task) {
if t != nil {
pool.execute(t)
}
}

// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *NoChanPool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}

// Submit executes the specified task, and returns the FutureResult and error info
func (pool *NoChanPool) Submit(t Task) (Future, error) {
if t == nil {
return nil, ErrTaskInvalid
}

f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}

pool.execute(tw)
return f, nil
}

// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}

+ 895
- 0
modules/obs/temporary.go View File

@@ -0,0 +1,895 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"errors"
"fmt"
"github.com/unknwon/com"
"io"
"net/http"
"os"
"strings"
"time"
)

// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error
func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) {
if input == nil {
return nil, errors.New("CreateSignedUrlInput is nil")
}

params := make(map[string]string, len(input.QueryParams))
for key, value := range input.QueryParams {
params[key] = value
}

if input.SubResource != "" {
params[string(input.SubResource)] = ""
}

headers := make(map[string][]string, len(input.Headers))
for key, value := range input.Headers {
headers[key] = []string{value}
}

if input.Expires <= 0 {
input.Expires = 300
}

requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires))
if err != nil {
return nil, err
}

output = &CreateSignedUrlOutput{
SignedUrl: requestURL,
ActualSignedRequestHeaders: headers,
}
return
}

func (obsClient ObsClient) isSecurityToken(params map[string]string) {
if obsClient.conf.securityProvider.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken
}
}
}

// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput,
// and returns the CreateBrowserBasedSignatureOutput and error
func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
if input == nil {
return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
}

params := make(map[string]string, len(input.FormParams))
for key, value := range input.FormParams {
params[key] = value
}

date := time.Now().UTC()
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)

credential, _ := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)

if input.Expires <= 0 {
input.Expires = 300
}

expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
if obsClient.conf.signature == SignatureV4 {
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
}

obsClient.isSecurityToken(params)

matchAnyBucket := true
matchAnyKey := true
count := 5
if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
params["bucket"] = bucket
matchAnyBucket = false
count--
}

if key := strings.TrimSpace(input.Key); key != "" {
params["key"] = key
matchAnyKey = false
count--
}

originPolicySlice := make([]string, 0, len(params)+count)
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
originPolicySlice = append(originPolicySlice, "\"conditions\":[")
for key, value := range params {
if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
}
}

if matchAnyBucket {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
}

if matchAnyKey {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
}

originPolicySlice = append(originPolicySlice, "]}")

originPolicy := strings.Join(originPolicySlice, "")
policy := Base64Encode([]byte(originPolicy))
var signature string
if obsClient.conf.signature == SignatureV4 {
signature = getSignature(policy, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
} else {
signature = Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(policy)))
}

output = &CreateBrowserBasedSignatureOutput{
OriginPolicy: originPolicy,
Policy: policy,
Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
Date: params[PARAM_DATE_AMZ_CAMEL],
Signature: signature,
}
return
}

// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers
func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
output = &ListBucketsOutput{}
err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
output = &ListObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
}
return
}

// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
output = &ListVersionsOutput{}
err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
}
return
}

// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a
// specified bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
output = &ListMultipartUploadsOutput{}
err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
output = &GetBucketQuotaOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
output = &GetBucketMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetBucketMetadataOutput(output)
}
return
}

// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
output = &GetBucketStorageInfoOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
output = &GetBucketPolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
if err != nil {
output = nil
}
return
}

// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
output = &GetBucketCorsOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
output = &GetBucketVersioningOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
output = &GetBucketWebsiteConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
output = &GetBucketLoggingConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
output = &GetBucketLifecycleConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
output = &GetBucketTaggingOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
output = &GetBucketNotificationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
output = &DeleteObjectOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseDeleteObjectOutput(output)
}
return
}

// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data
func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
output = &DeleteObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
output = &GetObjectAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}
}
return
}

// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
output = &GetObjectMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectMetadataOutput(output)
}
return
}

// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
output = &GetObjectOutput{}
err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectOutput(output)
}
return
}

// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}

// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path
func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
var data io.Reader
sourceFile = strings.TrimSpace(sourceFile)
if sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()

stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd

var contentLength int64
if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
contentLength = StringToInt64(value[0], -1)
} else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
contentLength = StringToInt64(value[0], -1)
} else {
contentLength = stat.Size()
}
if contentLength > stat.Size() {
return nil, errors.New("ContentLength is larger than fileSize")
}
fileReaderWrapper.totalCount = contentLength
data = fileReaderWrapper
}

output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}

// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers
func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
output = &CopyObjectOutput{}
err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyObjectOutput(output)
}
return
}

// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers
func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
output = &InitiateMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseInitiateMultipartUploadOutput(output)
}
return
}

// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
output = &UploadPartOutput{}
err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseUploadPartOutput(output)
}
return
}

// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
output = &CompleteMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseCompleteMultipartUploadOutput(output)
}
return
}

// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
output = &ListPartsOutput{}
err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
output = &CopyPartOutput{}
err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyPartOutput(output)
}
return
}

// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) {
output = &GetBucketRequestPaymentOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}


func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (*http.Request, error) {
var req *http.Request

input := &UploadPartInput{}
input.Bucket = bucketName
input.Key = objectKey
input.PartNumber = partNumber
input.UploadId = uploadId
//input.ContentMD5 = _input.ContentMD5
//input.SourceFile = _input.SourceFile
//input.Offset = _input.Offset
input.PartSize = partSize
//input.SseHeader = _input.SseHeader
//input.Body = _input.Body

params, headers, _, err := input.trans(obsClient.conf.signature == SignatureObs)
if err != nil {
return req, err
}

if params == nil {
params = make(map[string]string)
}

if headers == nil {
headers = make(map[string][]string)
}

var extensions []extensionOptions
for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
if _err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_WARN, "Unsupported extensionOptions")
}
}

headers["Content-Length"] = []string{com.ToStr(partNumber,10)}

requestURL, err := obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "")
if err != nil {
return req, nil
}

var _data io.Reader
req, err = http.NewRequest(HTTP_PUT, requestURL, _data)
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
if err != nil {
return req, err
}

if isDebugLogEnabled() {
auth := headers[HEADER_AUTH_CAMEL]
delete(headers, HEADER_AUTH_CAMEL)

var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
} else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
}
doLog(LEVEL_DEBUG, "Request headers: %v", headers)
headers[HEADER_AUTH_CAMEL] = auth
if isSecurityToken {
if obsClient.conf.signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = securityToken
} else {
headers[HEADER_STS_TOKEN_AMZ] = securityToken
}
}
}

for key, value := range headers {
if key == HEADER_HOST_CAMEL {
req.Host = value[0]
delete(headers, key)
} else if key == HEADER_CONTENT_LENGTH_CAMEL {
req.ContentLength = StringToInt64(value[0], -1)
delete(headers, key)
} else {
req.Header[key] = value
}
}

var lastRequest *http.Request
lastRequest = req

req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}

if lastRequest != nil {
req.Host = lastRequest.Host
req.ContentLength = lastRequest.ContentLength
}

return req, nil
}

+ 909
- 0
modules/obs/trait.go View File

@@ -0,0 +1,909 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:structcheck, unused
//nolint:golint, unused
package obs

import (
"bytes"
"fmt"
"io"
"os"
"strings"
)

// IReadCloser defines interface with function: setReadCloser
type IReadCloser interface {
setReadCloser(body io.ReadCloser)
}

func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
output.Body = body
}

func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) {
if isObs {
header = HEADER_PREFIX_OBS + header
headers[header] = headerValue
} else {
header = HEADER_PREFIX + header
headers[header] = headerValue
}
}

func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) {
if isObs {
headers[header] = headerValue
} else {
headers[headerNext] = headerValue
}
}

// IBaseModel defines interface for base response model
type IBaseModel interface {
setStatusCode(statusCode int)

setRequestID(requestID string)

setResponseHeaders(responseHeaders map[string][]string)
}

// ISerializable defines interface with function: trans
type ISerializable interface {
trans(isObs bool) (map[string]string, map[string][]string, interface{}, error)
}

// DefaultSerializable defines default serializable struct
type DefaultSerializable struct {
params map[string]string
headers map[string][]string
data interface{}
}

func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) {
return input.params, input.headers, input.data, nil
}

var defaultSerializable = &DefaultSerializable{}

func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
}

func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(subResource): ""}
data, err = ConvertRequestToIoReader(input)
return
}

func (baseModel *BaseModel) setStatusCode(statusCode int) {
baseModel.StatusCode = statusCode
}

func (baseModel *BaseModel) setRequestID(requestID string) {
baseModel.RequestId = requestID
}

func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
baseModel.ResponseHeaders = responseHeaders
}

func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if input.QueryLocation && !isObs {
setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs)
}
return
}

func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
if grantReadID := input.GrantReadId; grantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs)
}
if grantWriteID := input.GrantWriteId; grantWriteID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs)
}
if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs)
}
if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs)
}
if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs)
}
if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true)
}
if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true)
}
}

func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs)
}
if epid := input.Epid; epid != "" {
setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs)
}
if availableZone := input.AvailableZone; availableZone != "" {
setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs)
}

input.prepareGrantHeaders(headers, isObs)
if location := strings.TrimSpace(input.Location); location != "" {
input.Location = location

xml := make([]string, 0, 3)
xml = append(xml, "<CreateBucketConfiguration>")
if isObs {
xml = append(xml, fmt.Sprintf("<Location>%s</Location>", input.Location))
} else {
xml = append(xml, fmt.Sprintf("<LocationConstraint>%s</LocationConstraint>", input.Location))
}
xml = append(xml, "</CreateBucketConfiguration>")

data = strings.Join(xml, "")
}
return
}

func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
xml := make([]string, 0, 1)
if !isObs {
storageClass := "STANDARD"
if input.StorageClass == StorageClassWarm {
storageClass = string(storageClassStandardIA)
} else if input.StorageClass == StorageClassCold {
storageClass = string(storageClassGlacier)
}
params = map[string]string{string(SubResourceStoragePolicy): ""}
xml = append(xml, fmt.Sprintf("<StoragePolicy><DefaultStorageClass>%s</DefaultStorageClass></StoragePolicy>", storageClass))
} else {
if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold {
input.StorageClass = StorageClassStandard
}
params = map[string]string{string(SubResourceStorageClass): ""}
xml = append(xml, fmt.Sprintf("<StorageClass>%s</StorageClass>", input.StorageClass))
}
data = strings.Join(xml, "")
return
}

func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxKeys > 0 {
params["max-keys"] = IntToString(input.MaxKeys)
}
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}

func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
if input.Marker != "" {
params["marker"] = input.Marker
}
return
}

func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
params[string(SubResourceVersions)] = ""
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.VersionIdMarker != "" {
params["version-id-marker"] = input.VersionIdMarker
}
return
}

func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceUploads): ""}
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxUploads > 0 {
params["max-uploads"] = IntToString(input.MaxUploads)
}
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.UploadIdMarker != "" {
params["upload-id-marker"] = input.UploadIdMarker
}
return
}

func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceQuota, input)
}

func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
headers = make(map[string][]string)

if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs)
}
return
}

func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourcePolicy): ""}
data = strings.NewReader(input.Policy)
return
}

func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceCors): ""}
data, md5, err := ConvertRequestToIoReaderV2(input)
if err != nil {
return
}
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceVersioning, input)
}

func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceWebsite): ""}
data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
return
}

func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}

func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLogging): ""}
data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs)
return
}

func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLifecycle): ""}
data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs)
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceTagging): ""}
data, md5, err := ConvertRequestToIoReaderV2(input)
if err != nil {
return
}
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceNotification): ""}
data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs)
return
}

func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceDelete): ""}
data, md5, err := ConvertRequestToIoReaderV2(input)
if err != nil {
return
}
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs)
}
return
}

func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRestore): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
if !isObs {
data, err = ConvertRequestToIoReader(input)
} else {
data = ConverntObsRestoreToXml(input)
}
return
}

// GetEncryption gets the Encryption field value from SseKmsHeader
func (header SseKmsHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
if !header.isObs {
return DEFAULT_SSE_KMS_ENCRYPTION
}
return DEFAULT_SSE_KMS_ENCRYPTION_OBS
}

// GetKey gets the Key field value from SseKmsHeader
func (header SseKmsHeader) GetKey() string {
return header.Key
}

// GetEncryption gets the Encryption field value from SseCHeader
func (header SseCHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
return DEFAULT_SSE_C_ENCRYPTION
}

// GetKey gets the Key field value from SseCHeader
func (header SseCHeader) GetKey() string {
return header.Key
}

// GetKeyMD5 gets the KeyMD5 field value from SseCHeader
func (header SseCHeader) GetKeyMD5() string {
if header.KeyMD5 != "" {
return header.KeyMD5
}

if ret, err := Base64Decode(header.GetKey()); err == nil {
return Base64Md5(ret)
}
return ""
}

func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) {
if sseHeader != nil {
if sseCHeader, ok := sseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
} else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
sseKmsHeader.isObs = isObs
setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs)
if sseKmsHeader.GetKey() != "" {
setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs)
}
}
}
}

func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)

if input.Origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
}

if input.RequestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
}
setSseHeader(headers, input.SseHeader, true, isObs)
return
}

func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) {
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
}

if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
}

func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) {
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
}

func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
params = map[string]string{string(SubResourceMetadata): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)

if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs)
} else {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs)
}
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
}
input.prepareContentHeaders(headers)
if input.Expires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
}
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
}
input.prepareStorageClass(headers, isObs)
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}

func (input GetObjectInput) prepareResponseParams(params map[string]string) {
if input.ResponseCacheControl != "" {
params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
}
if input.ResponseContentDisposition != "" {
params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
}
if input.ResponseContentEncoding != "" {
params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
}
if input.ResponseContentLanguage != "" {
params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
}
if input.ResponseContentType != "" {
params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
}
if input.ResponseExpires != "" {
params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
}
}

func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.GetObjectMetadataInput.trans(isObs)
if err != nil {
return
}
input.prepareResponseParams(params)
if input.ImageProcess != "" {
params[PARAM_IMAGE_PROCESS] = input.ImageProcess
}
if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
}

if input.IfMatch != "" {
headers[HEADER_IF_MATCH] = []string{input.IfMatch}
}
if input.IfNoneMatch != "" {
headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
}
if !input.IfModifiedSince.IsZero() {
headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
}
if !input.IfUnmodifiedSince.IsZero() {
headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
}
return
}

func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) {
if GrantReadID := input.GrantReadId; GrantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true)
}
if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true)
}
if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true)
}
if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true)
}
}

func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
params = make(map[string]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
input.prepareGrantHeaders(headers)
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)

}
setSseHeader(headers, input.SseHeader, false, isObs)
if input.Expires != 0 {
setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true)
}
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}

func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}

if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}

if input.ContentLength > 0 {
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}

return
}

func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
if err != nil {
return
}
if input.Body != nil {
data = input.Body
}
return
}

func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) {
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
}
if input.Expires != "" {
headers[HEADER_EXPIRES] = []string{input.Expires}
}
}

func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) {
if input.CopySourceIfMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs)
}
if input.CopySourceIfNoneMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs)
}
if !input.CopySourceIfModifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs)
}
if !input.CopySourceIfUnmodifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs)
}
}

func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}

var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)

if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs)
}

if input.MetadataDirective == ReplaceMetadata {
input.prepareReplaceHeaders(headers)
}

input.prepareCopySourceHeaders(headers, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}
}
if input.SuccessActionRedirect != "" {
headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect}
}
return
}

func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
return
}

func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
params[string(SubResourceUploads)] = ""
return
}

func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string)
setSseHeader(headers, input.SseHeader, true, isObs)
if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}
if input.Body != nil {
data = input.Body
}
return
}

func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
return
}

func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
if input.MaxParts > 0 {
params["max-parts"] = IntToString(input.MaxParts)
}
if input.PartNumberMarker > 0 {
params["part-number-marker"] = IntToString(input.PartNumberMarker)
}
return
}

func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string, 1)
var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs)
}

setSseHeader(headers, input.SseHeader, true, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}

}
return
}

func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceRequestPayment, input)
}

type partSlice []Part

func (parts partSlice) Len() int {
return len(parts)
}

func (parts partSlice) Less(i, j int) bool {
return parts[i].PartNumber < parts[j].PartNumber
}

func (parts partSlice) Swap(i, j int) {
parts[i], parts[j] = parts[j], parts[i]
}

type readerWrapper struct {
reader io.Reader
mark int64
totalCount int64
readedCount int64
}

func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
if r, ok := rw.reader.(*strings.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*bytes.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*os.File); ok {
return r.Seek(offset, whence)
}
return offset, nil
}

func (rw *readerWrapper) Read(p []byte) (n int, err error) {
if rw.totalCount == 0 {
return 0, io.EOF
}
if rw.totalCount > 0 {
n, err = rw.reader.Read(p)
readedOnce := int64(n)
remainCount := rw.totalCount - rw.readedCount
if remainCount > readedOnce {
rw.readedCount += readedOnce
return n, err
}
rw.readedCount += remainCount
return int(remainCount), io.EOF
}
return rw.reader.Read(p)
}

type fileReaderWrapper struct {
readerWrapper
filePath string
}

func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchPolicyToJSON(input)
return
}

func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchJobToJSON(input)
return
}

func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

+ 873
- 0
modules/obs/transfer.go View File

@@ -0,0 +1,873 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"sync/atomic"
"syscall"
)

var errAbort = errors.New("AbortError")

// FileStatus defines the upload file properties
type FileStatus struct {
XMLName xml.Name `xml:"FileInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
}

// UploadPartInfo defines the upload part properties
type UploadPartInfo struct {
XMLName xml.Name `xml:"UploadPart"`
PartNumber int `xml:"PartNumber"`
Etag string `xml:"Etag"`
PartSize int64 `xml:"PartSize"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}

// UploadCheckpoint defines the upload checkpoint file properties
type UploadCheckpoint struct {
XMLName xml.Name `xml:"UploadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId,omitempty"`
UploadFile string `xml:"FileUrl"`
FileInfo FileStatus `xml:"FileInfo"`
UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"`
}

func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool {
if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.")
return false
}

if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.")
return false
}

if ufc.UploadId == "" {
doLog(LEVEL_INFO, "UploadId is invalid. clear the record.")
return false
}

return true
}

type uploadPartTask struct {
UploadPartInput
obsClient *ObsClient
abort *int32
extensions []extensionOptions
enableCheckpoint bool
}

func (task *uploadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}

input := &UploadPartInput{}
input.Bucket = task.Bucket
input.Key = task.Key
input.PartNumber = task.PartNumber
input.UploadId = task.UploadId
input.SseHeader = task.SseHeader
input.SourceFile = task.SourceFile
input.Offset = task.Offset
input.PartSize = task.PartSize
extensions := task.extensions

var output *UploadPartOutput
var err error
if extensions != nil {
output, err = task.obsClient.UploadPart(input, extensions...)
} else {
output, err = task.obsClient.UploadPart(input)
}

if err == nil {
if output.ETag == "" {
doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber)
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber)
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return err
}

func loadCheckpointFile(checkpointFile string, result interface{}) error {
ret, err := ioutil.ReadFile(checkpointFile)
if err != nil {
return err
}
if len(ret) == 0 {
return nil
}
return xml.Unmarshal(ret, result)
}

func updateCheckpointFile(fc interface{}, checkpointFilePath string) error {
result, err := xml.Marshal(fc)
if err != nil {
return err
}
err = ioutil.WriteFile(checkpointFilePath, result, 0666)
return err
}

func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, ufc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) {
if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err))
}
} else {
return false, nil
}

return true, nil
}

func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error {
initiateInput := &InitiateMultipartUploadInput{}
initiateInput.ObjectOperationInput = input.ObjectOperationInput
initiateInput.ContentType = input.ContentType
var output *InitiateMultipartUploadOutput
var err error
if extensions != nil {
output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...)
} else {
output, err = obsClient.InitiateMultipartUpload(initiateInput)
}
if err != nil {
return err
}

ufc.Bucket = input.Bucket
ufc.Key = input.Key
ufc.UploadFile = input.UploadFile
ufc.FileInfo = FileStatus{}
ufc.FileInfo.Size = uploadFileStat.Size()
ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix()
ufc.UploadId = output.UploadId

err = sliceFile(input.PartSize, ufc)
return err
}

func sliceFile(partSize int64, ufc *UploadCheckpoint) error {
fileSize := ufc.FileInfo.Size
cnt := fileSize / partSize
if cnt >= 10000 {
partSize = fileSize / 10000
if fileSize%10000 != 0 {
partSize++
}
cnt = fileSize / partSize
}
if fileSize%partSize != 0 {
cnt++
}

if partSize > MAX_PART_SIZE {
doLog(LEVEL_ERROR, "The source upload file is too large")
return fmt.Errorf("The source upload file is too large")
}

if cnt == 0 {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = 1
ufc.UploadParts = []UploadPartInfo{uploadPart}
} else {
uploadParts := make([]UploadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = int(i) + 1
uploadPart.PartSize = partSize
uploadPart.Offset = i * partSize
uploadParts = append(uploadParts, uploadPart)
}
if value := fileSize % partSize; value != 0 {
uploadParts[cnt-1].PartSize = value
}
ufc.UploadParts = uploadParts
}
return nil
}

func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error {
input := &AbortMultipartUploadInput{}
input.Bucket = bucket
input.Key = key
input.UploadId = uploadID
if extensions != nil {
_, err := obsClient.AbortMultipartUpload(input, extensions...)
return err
}
_, err := obsClient.AbortMultipartUpload(input)
return err
}

func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error {
if uploadPartError != nil {
if enableCheckpoint {
return uploadPartError
}
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return uploadPartError
}
return nil
}

func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
completeInput := &CompleteMultipartUploadInput{}
completeInput.Bucket = ufc.Bucket
completeInput.Key = ufc.Key
completeInput.UploadId = ufc.UploadId
parts := make([]Part, 0, len(ufc.UploadParts))
for _, uploadPart := range ufc.UploadParts {
part := Part{}
part.PartNumber = uploadPart.PartNumber
part.ETag = uploadPart.Etag
parts = append(parts, part)
}
completeInput.Parts = parts
var completeOutput *CompleteMultipartUploadOutput
if extensions != nil {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...)
} else {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput)
}

if err == nil {
if enableCheckpoint {
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err)
}
}
return completeOutput, err
}
if !enableCheckpoint {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
}
return completeOutput, err
}

func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
uploadFileStat, err := os.Stat(input.UploadFile)
if err != nil {
doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err))
return nil, err
}
if uploadFileStat.IsDir() {
doLog(LEVEL_ERROR, "UploadFile can not be a folder.")
return nil, errors.New("uploadFile can not be a folder")
}

ufc := &UploadCheckpoint{}

var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}
}
if needCheckpoint {
err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}

if enableCheckpoint {
err = updateCheckpointFile(ufc, checkpointFilePath)
if err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err)
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return nil, err
}
}
}

uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions)
err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions)
if err != nil {
return nil, err
}

completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, extensions)

return completeOutput, err
}

func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) {
if uploadPartOutput, ok := result.(*UploadPartOutput); ok {
lock.Lock()
defer lock.Unlock()
ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag
ufc.UploadParts[partNum-1].IsCompleted = true
if enableCheckpoint {
_err := updateCheckpointFile(ufc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}

func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var uploadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)
for _, uploadPart := range ufc.UploadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if uploadPart.IsCompleted {
continue
}
task := uploadPartTask{
UploadPartInput: UploadPartInput{
Bucket: ufc.Bucket,
Key: ufc.Key,
PartNumber: uploadPart.PartNumber,
UploadId: ufc.UploadId,
SseHeader: input.SseHeader,
SourceFile: input.UploadFile,
Offset: uploadPart.Offset,
PartSize: uploadPart.PartSize,
},
obsClient: &obsClient,
abort: &abort,
extensions: extensions,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
uploadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := uploadPartError.Load().(error); ok {
return err
}
return nil
}

// ObjectInfo defines download object info
type ObjectInfo struct {
XMLName xml.Name `xml:"ObjectInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
ETag string `xml:"ETag"`
}

// TempFileInfo defines temp download file properties
type TempFileInfo struct {
XMLName xml.Name `xml:"TempFileInfo"`
TempFileUrl string `xml:"TempFileUrl"`
Size int64 `xml:"Size"`
}

// DownloadPartInfo defines download part properties
type DownloadPartInfo struct {
XMLName xml.Name `xml:"DownloadPart"`
PartNumber int64 `xml:"PartNumber"`
RangeEnd int64 `xml:"RangeEnd"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}

// DownloadCheckpoint defines download checkpoint file properties
type DownloadCheckpoint struct {
XMLName xml.Name `xml:"DownloadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId,omitempty"`
DownloadFile string `xml:"FileUrl"`
ObjectInfo ObjectInfo `xml:"ObjectInfo"`
TempFileInfo TempFileInfo `xml:"TempFileInfo"`
DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"`
}

func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool {
if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.")
return false
}
if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.")
return false
}
if dfc.TempFileInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.")
return false
}
stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl)
if err != nil || stat.Size() != dfc.ObjectInfo.Size {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.")
return false
}

return true
}

type downloadPartTask struct {
GetObjectInput
obsClient *ObsClient
extensions []extensionOptions
abort *int32
partNumber int64
tempFileURL string
enableCheckpoint bool
}

func (task *downloadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}
getObjectInput := &GetObjectInput{}
getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput
getObjectInput.IfMatch = task.IfMatch
getObjectInput.IfNoneMatch = task.IfNoneMatch
getObjectInput.IfModifiedSince = task.IfModifiedSince
getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince
getObjectInput.RangeStart = task.RangeStart
getObjectInput.RangeEnd = task.RangeEnd

var output *GetObjectOutput
var err error
if task.extensions != nil {
output, err = task.obsClient.GetObject(getObjectInput, task.extensions...)
} else {
output, err = task.obsClient.GetObject(getObjectInput)
}

if err == nil {
defer func() {
errMsg := output.Body.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close response body.")
}
}()
_err := updateDownloadFile(task.tempFileURL, task.RangeStart, output)
if _err != nil {
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return _err
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return err
}

func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) {
if extensions != nil {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...)
} else {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput)
}

return
}

func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, dfc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !dfc.isValid(input, output) {
if dfc.TempFileInfo.TempFileUrl != "" {
_err := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err)
}
} else {
return false, nil
}

return true, nil
}

func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) {
cnt := objectSize / partSize
if objectSize%partSize > 0 {
cnt++
}

if cnt == 0 {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = 1
dfc.DownloadParts = []DownloadPartInfo{downloadPart}
} else {
downloadParts := make([]DownloadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = i + 1
downloadPart.Offset = i * partSize
downloadPart.RangeEnd = (i+1)*partSize - 1
downloadParts = append(downloadParts, downloadPart)
}
dfc.DownloadParts = downloadParts
if value := objectSize % partSize; value > 0 {
dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1
}
}
}

func createFile(tempFileURL string, fileSize int64) error {
fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := syscall.Close(fd)
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
err = syscall.Ftruncate(fd, fileSize)
if err != nil {
doLog(LEVEL_WARN, "Failed to create file with error [%v].", err)
}
return err
}

func prepareTempFile(tempFileURL string, fileSize int64) error {
parentDir := filepath.Dir(tempFileURL)
stat, err := os.Stat(parentDir)
if err != nil {
doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err)
_err := os.MkdirAll(parentDir, os.ModePerm)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err)
return _err
}
} else if !stat.IsDir() {
doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir)
return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir)
}

err = createFile(tempFileURL, fileSize)
if err == nil {
return nil
}
fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
if fileSize > 0 {
_, err = fd.WriteAt([]byte("a"), fileSize-1)
if err != nil {
doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err)
return err
}
}

return nil
}

func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error {
if downloadFileError != nil {
if !enableCheckpoint {
_err := os.Remove(tempFileURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
return downloadFileError
}
return nil
}

func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) {
getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions)
if err != nil {
return nil, err
}

objectSize := getObjectmetaOutput.ContentLength
partSize := input.PartSize
dfc := &DownloadCheckpoint{}

var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput)
if err != nil {
return nil, err
}
}

if needCheckpoint {
dfc.Bucket = input.Bucket
dfc.Key = input.Key
dfc.VersionId = input.VersionId
dfc.DownloadFile = input.DownloadFile
dfc.ObjectInfo = ObjectInfo{}
dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix()
dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength
dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag
dfc.TempFileInfo = TempFileInfo{}
dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp"
dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength

sliceObject(objectSize, partSize, dfc)
_err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size)
if _err != nil {
return nil, _err
}

if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err)
_errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _errMsg != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg)
}
return nil, _err
}
}
}

downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions)
err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError)
if err != nil {
return nil, err
}

err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile)
if err != nil {
doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err)
return nil, err
}
if enableCheckpoint {
err = os.Remove(checkpointFilePath)
if err != nil {
doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err)
}
}

return getObjectmetaOutput, nil
}

func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error {
fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
_, err = fd.Seek(rangeStart, 0)
if err != nil {
doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err)
return err
}
fileWriter := bufio.NewWriterSize(fd, 65536)
part := make([]byte, 8192)
var readErr error
var readCount int
for {
readCount, readErr = output.Body.Read(part)
if readCount > 0 {
wcnt, werr := fileWriter.Write(part[0:readCount])
if werr != nil {
doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr)
return werr
}
if wcnt != readCount {
doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
}
}
if readErr != nil {
if readErr != io.EOF {
doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr)
return readErr
}
break
}
}
err = fileWriter.Flush()
if err != nil {
doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err)
return err
}
return nil
}

func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) {
if _, ok := result.(*GetObjectOutput); ok {
lock.Lock()
defer lock.Unlock()
dfc.DownloadParts[partNum-1].IsCompleted = true
if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFile)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}

func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var downloadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)
for _, downloadPart := range dfc.DownloadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if downloadPart.IsCompleted {
continue
}
task := downloadPartTask{
GetObjectInput: GetObjectInput{
GetObjectMetadataInput: input.GetObjectMetadataInput,
IfMatch: input.IfMatch,
IfNoneMatch: input.IfNoneMatch,
IfUnmodifiedSince: input.IfUnmodifiedSince,
IfModifiedSince: input.IfModifiedSince,
RangeStart: downloadPart.Offset,
RangeEnd: downloadPart.RangeEnd,
},
obsClient: &obsClient,
extensions: extensions,
abort: &abort,
partNumber: downloadPart.PartNumber,
tempFileURL: dfc.TempFileInfo.TempFileUrl,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
downloadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := downloadPartError.Load().(error); ok {
return err
}

return nil
}

+ 536
- 0
modules/obs/util.go View File

@@ -0,0 +1,536 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:golint, unused
package obs

import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)

var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$")
var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+")
var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+")

// StringContains replaces subStr in src with subTranscoding and returns the new string
func StringContains(src string, subStr string, subTranscoding string) string {
return strings.Replace(src, subStr, subTranscoding, -1)
}

// XmlTranscoding replaces special characters with their escaped form
func XmlTranscoding(src string) string {
srcTmp := StringContains(src, "&", "&amp;")
srcTmp = StringContains(srcTmp, "<", "&lt;")
srcTmp = StringContains(srcTmp, ">", "&gt;")
srcTmp = StringContains(srcTmp, "'", "&apos;")
srcTmp = StringContains(srcTmp, "\"", "&quot;")
return srcTmp
}

// StringToInt converts string value to int value with default value
func StringToInt(value string, def int) int {
ret, err := strconv.Atoi(value)
if err != nil {
ret = def
}
return ret
}

// StringToInt64 converts string value to int64 value with default value
func StringToInt64(value string, def int64) int64 {
ret, err := strconv.ParseInt(value, 10, 64)
if err != nil {
ret = def
}
return ret
}

// IntToString converts int value to string value
func IntToString(value int) string {
return strconv.Itoa(value)
}

// Int64ToString converts int64 value to string value
func Int64ToString(value int64) string {
return strconv.FormatInt(value, 10)
}

// GetCurrentTimestamp gets unix time in milliseconds
func GetCurrentTimestamp() int64 {
return time.Now().UnixNano() / 1000000
}

// FormatUtcNow gets a textual representation of the UTC format time value
func FormatUtcNow(format string) string {
return time.Now().UTC().Format(format)
}

// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value
func FormatUtcToRfc1123(t time.Time) string {
ret := t.UTC().Format(time.RFC1123)
return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
}

// Md5 gets the md5 value of input
func Md5(value []byte) []byte {
m := md5.New()
_, err := m.Write(value)
if err != nil {
doLog(LEVEL_WARN, "MD5 failed to write")
}
return m.Sum(nil)
}

// HmacSha1 gets hmac sha1 value of input
func HmacSha1(key, value []byte) []byte {
mac := hmac.New(sha1.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha1 failed to write")
}
return mac.Sum(nil)
}

// HmacSha256 get hmac sha256 value if input
func HmacSha256(key, value []byte) []byte {
mac := hmac.New(sha256.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha256 failed to write")
}
return mac.Sum(nil)
}

// Base64Encode wrapper of base64.StdEncoding.EncodeToString
func Base64Encode(value []byte) string {
return base64.StdEncoding.EncodeToString(value)
}

// Base64Decode wrapper of base64.StdEncoding.DecodeString
func Base64Decode(value string) ([]byte, error) {
return base64.StdEncoding.DecodeString(value)
}

// HexMd5 returns the md5 value of input in hexadecimal format
func HexMd5(value []byte) string {
return Hex(Md5(value))
}

// Base64Md5 returns the md5 value of input with Base64Encode
func Base64Md5(value []byte) string {
return Base64Encode(Md5(value))
}

// Sha256Hash returns sha256 checksum
func Sha256Hash(value []byte) []byte {
hash := sha256.New()
_, err := hash.Write(value)
if err != nil {
doLog(LEVEL_WARN, "Sha256Hash failed to write")
}
return hash.Sum(nil)
}

// ParseXml wrapper of xml.Unmarshal
func ParseXml(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return xml.Unmarshal(value, result)
}

// parseJSON wrapper of json.Unmarshal
func parseJSON(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return json.Unmarshal(value, result)
}

// TransToXml wrapper of xml.Marshal
func TransToXml(value interface{}) ([]byte, error) {
if value == nil {
return []byte{}, nil
}
return xml.Marshal(value)
}

// Hex wrapper of hex.EncodeToString
func Hex(value []byte) string {
return hex.EncodeToString(value)
}

// HexSha256 returns the Sha256Hash value of input in hexadecimal format
func HexSha256(value []byte) string {
return Hex(Sha256Hash(value))
}

// UrlDecode wrapper of url.QueryUnescape
func UrlDecode(value string) (string, error) {
ret, err := url.QueryUnescape(value)
if err == nil {
return ret, nil
}
return "", err
}

// UrlDecodeWithoutError wrapper of UrlDecode
func UrlDecodeWithoutError(value string) string {
ret, err := UrlDecode(value)
if err == nil {
return ret
}
if isErrorLogEnabled() {
doLog(LEVEL_ERROR, "Url decode error")
}
return ""
}

// IsIP checks whether the value matches ip address
func IsIP(value string) bool {
return ipRegex.MatchString(value)
}

// UrlEncode encodes the input value
func UrlEncode(value string, chineseOnly bool) string {
if chineseOnly {
values := make([]string, 0, len(value))
for _, val := range value {
_value := string(val)
if regex.MatchString(_value) {
_value = url.QueryEscape(_value)
}
values = append(values, _value)
}
return strings.Join(values, "")
}
return url.QueryEscape(value)
}

func copyHeaders(m map[string][]string) (ret map[string][]string) {
if m != nil {
ret = make(map[string][]string, len(m))
for key, values := range m {
_values := make([]string, 0, len(values))
for _, value := range values {
_values = append(_values, value)
}
ret[strings.ToLower(key)] = _values
}
} else {
ret = make(map[string][]string)
}

return
}

func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) {
signature = "v2"
if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 {
if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) {
signature = "v4"
matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0])
if len(matches) >= 3 {
region = matches[1]
regions := regionRegex.FindStringSubmatch(region)
if len(regions) >= 2 {
region = regions[1]
}
signedHeaders = matches[2]
}

} else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) {
signature = "v2"
}
}
return
}

func getTemporaryKeys() []string {
return []string{
"Signature",
"signature",
"X-Amz-Signature",
"x-amz-signature",
}
}

func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool {
isObs := true
if isTemporary {
for _, value := range querys {
keyPrefix := strings.ToLower(value)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
} else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) {
isObs = false
}
}
} else {
for key := range headers {
keyPrefix := strings.ToLower(key)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
break
}
}
}
return isObs
}

func isPathStyle(headers map[string][]string, bucketName string) bool {
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
return true
}
return false
}

// GetV2Authorization v2 Authorization
func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {

if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}

method = strings.ToUpper(method)

querys := strings.Split(queryURL, "&")
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
params := make(map[string]string)

for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
headers = copyHeaders(headers)
pathStyle := isPathStyle(headers, bucketName)
conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}
conf.signature = SignatureObs
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true)
v2HashPrefix := OBS_HASH_PREFIX
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
return
}

// GetAuthorization Authorization
func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {

if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}

method = strings.ToUpper(method)

querys := strings.Split(queryURL, "&")
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
params := make(map[string]string)

for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
isTemporary := false
signature := "v2"
temporaryKeys := getTemporaryKeys()
for _, key := range temporaryKeys {
if _, ok := params[key]; ok {
isTemporary = true
if strings.ToLower(key) == "signature" {
signature = "v2"
} else if strings.ToLower(key) == "x-amz-signature" {
signature = "v4"
}
break
}
}
isObs := getIsObs(isTemporary, querysResult, headers)
headers = copyHeaders(headers)
pathStyle := false
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
pathStyle = true
}
conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}

if isTemporary {
return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs)
}
signature, region, signedHeaders := parseHeaders(headers)
if signature == "v4" {
conf.signature = SignatureV4
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestURL")
return nil
}
headerKeys := strings.Split(signedHeaders, ";")
_headers := make(map[string][]string, len(headerKeys))
for _, headerKey := range headerKeys {
_headers[headerKey] = headers[headerKey]
}
ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers)
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
v2HashPrefix := V2_HASH_PREFIX
if isObs {
v2HashPrefix = OBS_HASH_PREFIX
}
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
}
return

}

func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string,
headers map[string][]string, isObs bool) (ret map[string]string) {

if signature == "v4" {
conf.signature = SignatureV4

longDate, ok := params[PARAM_DATE_AMZ_CAMEL]
if !ok {
longDate = params[HEADER_DATE_AMZ]
}
shortDate := longDate[:8]

credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL]
if !ok {
credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)]
}

_credential := UrlDecodeWithoutError(credential)

regions := regionRegex.FindStringSubmatch(_credential)
var region string
if len(regions) >= 2 {
region = regions[1]
}

_, scope := getCredential(ak, region, shortDate)

expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL]
if !ok {
expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)]
}

signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL]
if !ok {
signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)]
}

algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL]
if !ok {
algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)]
}

if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok {
delete(params, PARAM_SIGNATURE_AMZ_CAMEL)
} else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok {
delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL))
}

ret = make(map[string]string, 6)
ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm
ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
ret[PARAM_DATE_AMZ_CAMEL] = longDate
ret[PARAM_EXPIRES_AMZ_CAMEL] = expires
ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders

requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestUrl")
return nil
}
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers)
ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false)
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
expires, ok := params["Expires"]
if !ok {
expires = params["expires"]
}
headers[HEADER_DATE_CAMEL] = []string{expires}
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
ret = make(map[string]string, 3)
ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false)
ret["AWSAccessKeyId"] = UrlEncode(ak, false)
ret["Expires"] = UrlEncode(expires, false)
}

return
}

+ 35
- 0
modules/setting/setting.go View File

@@ -447,6 +447,24 @@ var (
//blockchain config //blockchain config
BlockChainHost string BlockChainHost string
CommitValidDate string CommitValidDate string

//obs config
Endpoint string
AccessKeyID string
SecretAccessKey string
Bucket string
Location string
BasePath string
//RealPath string

//modelarts config
ModelArtsHost string
IamHost string
ProjectID string
ProjectName string
ModelArtsUsername string
ModelArtsPassword string
ModelArtsDomain string
) )


// DateLang transforms standard language locale name to corresponding value in datetime plugin. // DateLang transforms standard language locale name to corresponding value in datetime plugin.
@@ -1131,6 +1149,23 @@ func NewContext() {
sec = Cfg.Section("blockchain") sec = Cfg.Section("blockchain")
BlockChainHost = sec.Key("HOST").MustString("http://192.168.136.66:3302/") BlockChainHost = sec.Key("HOST").MustString("http://192.168.136.66:3302/")
CommitValidDate = sec.Key("COMMIT_VALID_DATE").MustString("2021-01-15") CommitValidDate = sec.Key("COMMIT_VALID_DATE").MustString("2021-01-15")

sec = Cfg.Section("obs")
Endpoint = sec.Key("ENDPOINT").MustString("112.95.163.82")
AccessKeyID = sec.Key("ACCESS_KEY_ID").MustString("")
SecretAccessKey = sec.Key("SECRET_ACCESS_KEY").MustString("")
Bucket = sec.Key("BUCKET").MustString("testopendata")
Location = sec.Key("LOCATION").MustString("cn-south-222")
BasePath = sec.Key("BASE_PATH").MustString("attachment/")

sec = Cfg.Section("modelarts")
ModelArtsHost = sec.Key("ENDPOINT").MustString("112.95.163.80")
IamHost = sec.Key("IAMHOST").MustString("112.95.163.80")
ProjectID = sec.Key("PROJECT_ID").MustString("")
ProjectName = sec.Key("PROJECT_NAME").MustString("")
ModelArtsUsername = sec.Key("USERNAME").MustString("")
ModelArtsPassword = sec.Key("PASSWORD").MustString("")
ModelArtsDomain = sec.Key("DOMAIN").MustString("cn-south-222")
} }


func loadInternalToken(sec *ini.Section) string { func loadInternalToken(sec *ini.Section) string {


+ 175
- 0
modules/storage/obs.go View File

@@ -0,0 +1,175 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.

package storage

import (
"io"
"path"
"strconv"
"strings"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
"code.gitea.io/gitea/modules/setting"
)

//check if has the object
//todo:修改查询方式
func ObsHasObject(path string) (bool, error) {
hasObject := false
output, err := ObsCli.ListObjects(&obs.ListObjectsInput{Bucket:setting.Bucket})
if err != nil {
log.Error("ListObjects failed:%v", err)
return hasObject, err
}

for _, obj := range output.Contents {
//obj.Key:attachment/0/1/019fd24e-4ef7-41cc-9f85-4a7b8504d958
if path == obj.Key {
hasObject = true
break
}
}

return hasObject, nil
}

func GetObsPartInfos(uuid string, uploadID string) (string, error) {
key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")

output, err := ObsCli.ListParts(&obs.ListPartsInput{
Bucket: setting.Bucket,
Key: key,
UploadId: uploadID,
})
if err != nil {
log.Error("ListParts failed:", err.Error())
return "", err
}

var chunks string
for _, partInfo := range output.Parts {
chunks += strconv.Itoa(partInfo.PartNumber) + "-" + partInfo.ETag + ","
}

return chunks, nil
}

func NewObsMultiPartUpload(uuid string) (string, error) {
input := &obs.InitiateMultipartUploadInput{}
input.Bucket = setting.Bucket
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")

output, err := ObsCli.InitiateMultipartUpload(input)
if err != nil {
log.Error("InitiateMultipartUpload failed:", err.Error())
return "", err
}

return output.UploadId, nil
}

func CompleteObsMultiPartUpload(uuid string, uploadID string) error {
input := &obs.CompleteMultipartUploadInput{}
input.Bucket = setting.Bucket
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
input.UploadId = uploadID
output, err := ObsCli.ListParts(&obs.ListPartsInput{
Bucket: setting.Bucket,
Key: input.Key,
UploadId: uploadID,
})
if err != nil {
log.Error("ListParts failed:", err.Error())
return err
}

for _, partInfo := range output.Parts {
input.Parts = append(input.Parts, obs.Part{
PartNumber: partInfo.PartNumber,
ETag: partInfo.ETag,
})
}

_, err = ObsCli.CompleteMultipartUpload(input)
if err != nil {
log.Error("CompleteMultipartUpload failed:", err.Error())
return err
}

return nil
}

func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, body io.Reader) (string, error) {
input := &obs.UploadPartInput{}
input.PartNumber = partNumber
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
input.UploadId = uploadId
input.Bucket = setting.Bucket
input.PartSize = partSize
input.Body = body
output, err := ObsCli.UploadPart(input)
if err != nil {
log.Error("UploadPart failed:", err.Error())
return "", err
}

return output.ETag, nil
}

func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) {
/*
input := &obs.CreateSignedUrlInput{}
input.Bucket = setting.Bucket
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")
input.Expires = int(PresignedUploadPartUrlExpireTime)
input.Method = obs.HTTP_PUT

input.QueryParams = map[string]string{
"Bucket": input.Bucket,
"Key": input.Key,
"PartNumber": com.ToStr(partNumber,10),
"UploadId": uploadId,
"PartSize": com.ToStr(partSize,10),
}

input.Headers = map[string]string{

}

*/

Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
req, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize)
if err != nil {
log.Error("CreateSignedUrl failed:", err.Error())
return "", err
}

log.Info(req.URL.String())
log.Info("", req.Header)

return req.URL.String(), nil

}

func ObsGetPreSignedUrl(uuid, fileName string) (string, error) {
input := &obs.CreateSignedUrlInput{}
input.Method = obs.HttpMethodGet
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
input.Bucket = setting.Bucket
input.Expires = 60 * 60
reqParams := make(map[string]string)
reqParams["response-content-disposition"] = "attachment; filename=\"" + fileName + "\""
input.QueryParams = reqParams
output, err := ObsCli.CreateSignedUrl(input)
if err != nil {
log.Error("CreateSignedUrl failed:", err.Error())
return "", err
}

return output.SignedUrl, nil
}

+ 9
- 0
modules/storage/storage.go View File

@@ -8,6 +8,8 @@ import (
"fmt" "fmt"
"io" "io"


"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
"code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/setting"
) )


@@ -40,6 +42,7 @@ func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, sr
var ( var (
// Attachments represents attachments storage // Attachments represents attachments storage
Attachments ObjectStorage Attachments ObjectStorage
ObsCli *obs.ObsClient
) )


// Init init the stoarge // Init init the stoarge
@@ -63,6 +66,12 @@ func Init() error {
return fmt.Errorf("Unsupported attachment store type: %s", setting.Attachment.StoreType) return fmt.Errorf("Unsupported attachment store type: %s", setting.Attachment.StoreType)
} }


ObsCli, err = obs.New(setting.AccessKeyID, setting.SecretAccessKey, setting.Endpoint)
if err != nil {
log.Error("obs.New failed:", err)
return err
}

if err != nil { if err != nil {
return err return err
} }


+ 6
- 1
options/locale/locale_zh-CN.ini View File

@@ -756,7 +756,11 @@ cloudbrain.commit_image=提交
balance=余额 balance=余额
balance.total_view=余额总览 balance.total_view=余额总览
balance.available=可用余额: balance.available=可用余额:
balance.disable=不可用余额:
cloudbrain1=云脑1
cloudbrain2=云脑2
cloudbrain_selection=云脑选择
cloudbrain_platform_selection=选择您准备使用的云脑平台:
confirm_choice=确定


template.items=模板选项 template.items=模板选项
template.git_content=Git数据(默认分支) template.git_content=Git数据(默认分支)
@@ -2439,6 +2443,7 @@ file_status=文件处理状态:
file_init_status=等待上传 file_init_status=等待上传
waitting_uploading=请等待文件传输完成 waitting_uploading=请等待文件传输完成
md5_computing=计算MD5 md5_computing=计算MD5
obs-connecting=obs连接中
loading_file=加载文件 loading_file=加载文件
uploading=正在上传 uploading=正在上传
upload_complete=上传完成 upload_complete=上传完成


+ 165
- 9
package-lock.json View File

@@ -1922,9 +1922,16 @@
"axios": { "axios": {
"version": "0.21.1", "version": "0.21.1",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz",
"integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==",
"integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==",
"requires": { "requires": {
"follow-redirects": "1.5.10"
"follow-redirects": "^1.10.0"
},
"dependencies": {
"follow-redirects": {
"version": "1.13.2",
"resolved": "https://registry.npm.taobao.org/follow-redirects/download/follow-redirects-1.13.2.tgz?cache=0&sync_timestamp=1611606737937&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffollow-redirects%2Fdownload%2Ffollow-redirects-1.13.2.tgz",
"integrity": "sha1-3XPI7/wScoulz0JZ12DqX7g+MUc="
}
} }
}, },
"babel-loader": { "babel-loader": {
@@ -1947,6 +1954,28 @@
"object.assign": "^4.1.0" "object.assign": "^4.1.0"
} }
}, },
"babel-polyfill": {
"version": "6.26.0",
"resolved": "https://registry.npm.taobao.org/babel-polyfill/download/babel-polyfill-6.26.0.tgz",
"integrity": "sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM=",
"requires": {
"babel-runtime": "^6.26.0",
"core-js": "^2.5.0",
"regenerator-runtime": "^0.10.5"
},
"dependencies": {
"core-js": {
"version": "2.6.12",
"resolved": "https://registry.npm.taobao.org/core-js/download/core-js-2.6.12.tgz?cache=0&sync_timestamp=1611040749668&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fcore-js%2Fdownload%2Fcore-js-2.6.12.tgz",
"integrity": "sha1-2TM9+nsGXjR8xWgiGdb2kIWcwuw="
},
"regenerator-runtime": {
"version": "0.10.5",
"resolved": "https://registry.npm.taobao.org/regenerator-runtime/download/regenerator-runtime-0.10.5.tgz",
"integrity": "sha1-M2w+/BIgrc7dosn6tntaeVWjNlg="
}
}
},
"babel-runtime": { "babel-runtime": {
"version": "6.26.0", "version": "6.26.0",
"resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz",
@@ -2136,6 +2165,11 @@
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
}, },
"blueimp-md5": {
"version": "2.18.0",
"resolved": "https://registry.npm.taobao.org/blueimp-md5/download/blueimp-md5-2.18.0.tgz",
"integrity": "sha1-EVK+EzXwxrORHtnjbbVPPmrFKTU="
},
"bn.js": { "bn.js": {
"version": "5.1.1", "version": "5.1.1",
"resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.1.tgz", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.1.tgz",
@@ -3409,6 +3443,11 @@
"assert-plus": "^1.0.0" "assert-plus": "^1.0.0"
} }
}, },
"date-format": {
"version": "3.0.0",
"resolved": "https://registry.npm.taobao.org/date-format/download/date-format-3.0.0.tgz",
"integrity": "sha1-64eANlx9KxURB4+0keZHl4DzrZU="
},
"dateformat": { "dateformat": {
"version": "2.2.0", "version": "2.2.0",
"resolved": "https://registry.npmjs.org/dateformat/-/dateformat-2.2.0.tgz", "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-2.2.0.tgz",
@@ -4020,6 +4059,38 @@
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
}, },
"esdk-obs-browserjs": {
"version": "3.20.7",
"resolved": "https://registry.npm.taobao.org/esdk-obs-browserjs/download/esdk-obs-browserjs-3.20.7.tgz",
"integrity": "sha1-vhziRlKEhW3PgZPl0DyX68bJI0s=",
"requires": {
"axios": "^0.19.0",
"babel-polyfill": "^6.26.0",
"blueimp-md5": "^2.10.0",
"js-base64": "^2.3.2",
"jssha": "^2.3.1",
"urijs": "^1.19.1"
},
"dependencies": {
"axios": {
"version": "0.19.2",
"resolved": "https://registry.npm.taobao.org/axios/download/axios-0.19.2.tgz?cache=0&sync_timestamp=1608609215811&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Faxios%2Fdownload%2Faxios-0.19.2.tgz",
"integrity": "sha1-PqNsXYgY0NX4qKl6bTa4bNwAyyc=",
"requires": {
"follow-redirects": "1.5.10"
}
}
}
},
"esdk-obs-nodejs": {
"version": "3.20.11",
"resolved": "https://registry.npm.taobao.org/esdk-obs-nodejs/download/esdk-obs-nodejs-3.20.11.tgz?cache=0&sync_timestamp=1610351636380&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fesdk-obs-nodejs%2Fdownload%2Fesdk-obs-nodejs-3.20.11.tgz",
"integrity": "sha1-/bMuzu3qoT+xLgmCcgg8yM6MIsE=",
"requires": {
"log4js": "^6.3.0",
"xml2js": "^0.4.23"
}
},
"eslint": { "eslint": {
"version": "6.8.0", "version": "6.8.0",
"resolved": "https://registry.npm.taobao.org/eslint/download/eslint-6.8.0.tgz", "resolved": "https://registry.npm.taobao.org/eslint/download/eslint-6.8.0.tgz",
@@ -5195,8 +5266,7 @@
"flatted": { "flatted": {
"version": "2.0.2", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz",
"integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==",
"dev": true
"integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA=="
}, },
"flatten": { "flatten": {
"version": "1.0.3", "version": "1.0.3",
@@ -5214,23 +5284,23 @@
}, },
"follow-redirects": { "follow-redirects": {
"version": "1.5.10", "version": "1.5.10",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz",
"integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==",
"resolved": "https://registry.npm.taobao.org/follow-redirects/download/follow-redirects-1.5.10.tgz?cache=0&sync_timestamp=1611606737937&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffollow-redirects%2Fdownload%2Ffollow-redirects-1.5.10.tgz",
"integrity": "sha1-e3qfmuov3/NnhqlP9kPtB/T/Xio=",
"requires": { "requires": {
"debug": "=3.1.0" "debug": "=3.1.0"
}, },
"dependencies": { "dependencies": {
"debug": { "debug": {
"version": "3.1.0", "version": "3.1.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
"resolved": "https://registry.npm.taobao.org/debug/download/debug-3.1.0.tgz?cache=0&sync_timestamp=1607566533140&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fdebug%2Fdownload%2Fdebug-3.1.0.tgz",
"integrity": "sha1-W7WgZyYotkFJVmuhaBnmFRjGcmE=",
"requires": { "requires": {
"ms": "2.0.0" "ms": "2.0.0"
} }
}, },
"ms": { "ms": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"resolved": "https://registry.npm.taobao.org/ms/download/ms-2.0.0.tgz?cache=0&sync_timestamp=1607433842694&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fms%2Fdownload%2Fms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
} }
} }
@@ -5367,6 +5437,21 @@
"readable-stream": "^2.0.0" "readable-stream": "^2.0.0"
} }
}, },
"fs": {
"version": "0.0.1-security",
"resolved": "https://registry.npm.taobao.org/fs/download/fs-0.0.1-security.tgz",
"integrity": "sha1-invTcYa23d84E/I4WLV+yq9eQdQ="
},
"fs-extra": {
"version": "8.1.0",
"resolved": "https://registry.npm.taobao.org/fs-extra/download/fs-extra-8.1.0.tgz?cache=0&sync_timestamp=1611075469998&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffs-extra%2Fdownload%2Ffs-extra-8.1.0.tgz",
"integrity": "sha1-SdQ8RaiM2Wd2aMt74bRu/bjS4cA=",
"requires": {
"graceful-fs": "^4.2.0",
"jsonfile": "^4.0.0",
"universalify": "^0.1.0"
}
},
"fs-minipass": { "fs-minipass": {
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
@@ -7700,6 +7785,14 @@
"minimist": "^1.2.5" "minimist": "^1.2.5"
} }
}, },
"jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npm.taobao.org/jsonfile/download/jsonfile-4.0.0.tgz?cache=0&sync_timestamp=1604161797011&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjsonfile%2Fdownload%2Fjsonfile-4.0.0.tgz",
"integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
"requires": {
"graceful-fs": "^4.1.6"
}
},
"jsprim": { "jsprim": {
"version": "1.4.1", "version": "1.4.1",
"resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
@@ -7712,6 +7805,11 @@
"verror": "1.10.0" "verror": "1.10.0"
} }
}, },
"jssha": {
"version": "2.4.2",
"resolved": "https://registry.npm.taobao.org/jssha/download/jssha-2.4.2.tgz",
"integrity": "sha1-2VCwlWNJKL1rK9odQtqaOnYtZek="
},
"just-debounce": { "just-debounce": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/just-debounce/-/just-debounce-1.0.0.tgz", "resolved": "https://registry.npmjs.org/just-debounce/-/just-debounce-1.0.0.tgz",
@@ -8184,6 +8282,18 @@
"chalk": "^2.4.2" "chalk": "^2.4.2"
} }
}, },
"log4js": {
"version": "6.3.0",
"resolved": "https://registry.npm.taobao.org/log4js/download/log4js-6.3.0.tgz",
"integrity": "sha1-EN+vu0NDUaPjAnegC5h5RG9xW8s=",
"requires": {
"date-format": "^3.0.0",
"debug": "^4.1.1",
"flatted": "^2.0.1",
"rfdc": "^1.1.4",
"streamroller": "^2.2.4"
}
},
"longest": { "longest": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz",
@@ -11821,6 +11931,11 @@
"resolved": "https://registry.npmjs.org/rework-visit/-/rework-visit-1.0.0.tgz", "resolved": "https://registry.npmjs.org/rework-visit/-/rework-visit-1.0.0.tgz",
"integrity": "sha1-mUWygD8hni96ygCtuLyfZA+ELJo=" "integrity": "sha1-mUWygD8hni96ygCtuLyfZA+ELJo="
}, },
"rfdc": {
"version": "1.2.0",
"resolved": "https://registry.npm.taobao.org/rfdc/download/rfdc-1.2.0.tgz?cache=0&sync_timestamp=1610744108114&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Frfdc%2Fdownload%2Frfdc-1.2.0.tgz",
"integrity": "sha1-npiUJY9I8oS0PDFDxoBwpPNzuUk="
},
"rgb-regex": { "rgb-regex": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz",
@@ -12428,6 +12543,23 @@
"resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
"integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
}, },
"streamroller": {
"version": "2.2.4",
"resolved": "https://registry.npm.taobao.org/streamroller/download/streamroller-2.2.4.tgz",
"integrity": "sha1-wZjO1C25QIamGTYIGHzoCl8rDlM=",
"requires": {
"date-format": "^2.1.0",
"debug": "^4.1.1",
"fs-extra": "^8.1.0"
},
"dependencies": {
"date-format": {
"version": "2.1.0",
"resolved": "https://registry.npm.taobao.org/date-format/download/date-format-2.1.0.tgz",
"integrity": "sha1-MdW16iEc9f12TNOLr50DPffhJc8="
}
}
},
"strict-uri-encode": { "strict-uri-encode": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz",
@@ -13983,6 +14115,11 @@
"os-name": "^3.1.0" "os-name": "^3.1.0"
} }
}, },
"universalify": {
"version": "0.1.2",
"resolved": "https://registry.npm.taobao.org/universalify/download/universalify-0.1.2.tgz?cache=0&sync_timestamp=1603179967633&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Funiversalify%2Fdownload%2Funiversalify-0.1.2.tgz",
"integrity": "sha1-tkb2m+OULavOzJ1mOcgNwQXvqmY="
},
"unquote": { "unquote": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz",
@@ -14053,6 +14190,11 @@
"punycode": "^2.1.0" "punycode": "^2.1.0"
} }
}, },
"urijs": {
"version": "1.19.5",
"resolved": "https://registry.npm.taobao.org/urijs/download/urijs-1.19.5.tgz",
"integrity": "sha1-EZaDq0svsL1jfl6m3ZEXvKxo0+Q="
},
"urix": { "urix": {
"version": "0.1.0", "version": "0.1.0",
"resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
@@ -15035,6 +15177,20 @@
"repeat-string": "^1.5.2" "repeat-string": "^1.5.2"
} }
}, },
"xml2js": {
"version": "0.4.23",
"resolved": "https://registry.npm.taobao.org/xml2js/download/xml2js-0.4.23.tgz?cache=0&sync_timestamp=1599054229598&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fxml2js%2Fdownload%2Fxml2js-0.4.23.tgz",
"integrity": "sha1-oMaVFnUkIesqx1juTUzPWIQ+rGY=",
"requires": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"
}
},
"xmlbuilder": {
"version": "11.0.1",
"resolved": "https://registry.npm.taobao.org/xmlbuilder/download/xmlbuilder-11.0.1.tgz",
"integrity": "sha1-vpuuHIoEbnazESdyY0fQrXACvrM="
},
"xtend": { "xtend": {
"version": "4.0.2", "version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",


+ 3
- 0
package.json View File

@@ -19,9 +19,12 @@
"cssnano": "4.1.10", "cssnano": "4.1.10",
"domino": "2.1.5", "domino": "2.1.5",
"dropzone": "5.7.2", "dropzone": "5.7.2",
"esdk-obs-browserjs": "3.20.7",
"esdk-obs-nodejs": "3.20.11",
"fast-glob": "3.2.2", "fast-glob": "3.2.2",
"file-loader": "6.0.0", "file-loader": "6.0.0",
"fomantic-ui": "2.8.4", "fomantic-ui": "2.8.4",
"fs": "0.0.1-security",
"highlight.js": "10.0.3", "highlight.js": "10.0.3",
"imports-loader": "0.8.0", "imports-loader": "0.8.0",
"jquery": "3.5.1", "jquery": "3.5.1",


+ 3
- 0
routers/api/v1/api.go View File

@@ -852,6 +852,9 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/cloudbrain", func() { m.Group("/cloudbrain", func() {
m.Get("/:jobid", repo.GetCloudbrainTask) m.Get("/:jobid", repo.GetCloudbrainTask)
}, reqRepoReader(models.UnitTypeCloudBrain)) }, reqRepoReader(models.UnitTypeCloudBrain))
m.Group("/modelarts", func() {
m.Get("/:jobid", repo.GetModelArtsTask)
}, reqRepoReader(models.UnitTypeCloudBrain))
}, repoAssignment()) }, repoAssignment())
}) })




+ 45
- 0
routers/api/v1/repo/modelarts.go View File

@@ -0,0 +1,45 @@
// Copyright 2016 The Gogs Authors. All rights reserved.
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.

package repo

import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/modelarts"
"net/http"
)

func GetModelArtsTask(ctx *context.APIContext) {
var (
err error
)

jobID := ctx.Params(":jobid")
repoID := ctx.Repo.Repository.ID
job, err := models.GetRepoCloudBrainByJobID(repoID, jobID)
if err != nil {
ctx.NotFound(err)
return
}
result, err := modelarts.GetJob(jobID)
if err != nil {
ctx.NotFound(err)
return
}

job.Status = result.Status
err = models.UpdateJob(job)
if err != nil {
log.Error("UpdateJob failed:", err)
}

ctx.JSON(http.StatusOK, map[string]interface{}{
"JobID": jobID,
"JobStatus": result.Status,
})

}

+ 221
- 43
routers/repo/attachment.go View File

@@ -5,6 +5,16 @@
package repo package repo


import ( import (
contexExt "context"
"encoding/json"
"errors"
"fmt"
"mime/multipart"
"net/http"
"path"
"strconv"
"strings"

"code.gitea.io/gitea/models" "code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
@@ -13,12 +23,6 @@ import (
"code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/upload" "code.gitea.io/gitea/modules/upload"
"code.gitea.io/gitea/modules/worker" "code.gitea.io/gitea/modules/worker"
contexExt "context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"


gouuid "github.com/satori/go.uuid" gouuid "github.com/satori/go.uuid"
) )
@@ -37,6 +41,15 @@ type CloudBrainDataset struct {
CreateTime string `json:"created_at"` CreateTime string `json:"created_at"`
} }


type UploadForm struct {
UploadID string `form:"uploadId"`
UuID string `form:"uuid"`
PartSize int64 `form:"size"`
Offset int64 `form:"offset"`
PartNumber int `form:"chunkNumber"`
PartFile multipart.File `form:"file"`
}

func RenderAttachmentSettings(ctx *context.Context) { func RenderAttachmentSettings(ctx *context.Context) {
renderAttachmentSettings(ctx) renderAttachmentSettings(ctx)
} }
@@ -130,6 +143,13 @@ func DeleteAttachment(ctx *context.Context) {


// GetAttachment serve attachements // GetAttachment serve attachements
func GetAttachment(ctx *context.Context) { func GetAttachment(ctx *context.Context) {
typeCloudBrain := ctx.QueryInt("type")
err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}

attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid")) attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
if err != nil { if err != nil {
if models.IsErrAttachmentNotExist(err) { if models.IsErrAttachmentNotExist(err) {
@@ -183,19 +203,29 @@ func GetAttachment(ctx *context.Context) {


//If we have matched and access to release or issue //If we have matched and access to release or issue
if setting.Attachment.StoreType == storage.MinioStorageType { if setting.Attachment.StoreType == storage.MinioStorageType {
url, err := storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name)
if err != nil {
ctx.ServerError("PresignedGetURL", err)
return
url := ""
if typeCloudBrain == models.TypeCloudBrainOne {
url, err = storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name)
if err != nil {
ctx.ServerError("PresignedGetURL", err)
return
}
} else {
url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
if err != nil {
ctx.ServerError("ObsGetPreSignedUrl", err)
return
}
} }


log.Info(url)

if err = increaseDownloadCount(attach, dataSet); err != nil { if err = increaseDownloadCount(attach, dataSet); err != nil {
ctx.ServerError("Update", err) ctx.ServerError("Update", err)
return return
} }


http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)

} else { } else {
fr, err := storage.Attachments.Open(attach.RelativePath()) fr, err := storage.Attachments.Open(attach.RelativePath())
if err != nil { if err != nil {
@@ -263,13 +293,29 @@ func GetPresignedPutObjectURL(ctx *context.Context) {


// AddAttachment response for add attachment record // AddAttachment response for add attachment record
func AddAttachment(ctx *context.Context) { func AddAttachment(ctx *context.Context) {
uuid := ctx.Query("uuid")
has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
typeCloudBrain := ctx.QueryInt("type")
err := checkTypeCloudBrain(typeCloudBrain)
if err != nil { if err != nil {
ctx.ServerError("HasObject", err)
ctx.ServerError("checkTypeCloudBrain failed", err)
return return
} }


uuid := ctx.Query("uuid")
has := false
if typeCloudBrain == models.TypeCloudBrainOne {
has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
if err != nil {
ctx.ServerError("HasObject", err)
return
}
} else {
has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
if err != nil {
ctx.ServerError("ObsHasObject", err)
return
}
}

if !has { if !has {
ctx.Error(404, "attachment has not been uploaded") ctx.Error(404, "attachment has not been uploaded")
return return
@@ -282,6 +328,7 @@ func AddAttachment(ctx *context.Context) {
Name: ctx.Query("file_name"), Name: ctx.Query("file_name"),
Size: ctx.QueryInt64("size"), Size: ctx.QueryInt64("size"),
DatasetID: ctx.QueryInt64("dataset_id"), DatasetID: ctx.QueryInt64("dataset_id"),
Type: typeCloudBrain,
}) })


if err != nil { if err != nil {
@@ -291,16 +338,19 @@ func AddAttachment(ctx *context.Context) {


if attachment.DatasetID != 0 { if attachment.DatasetID != 0 {
if strings.HasSuffix(attachment.Name, ".zip") { if strings.HasSuffix(attachment.Name, ".zip") {
err = worker.SendDecompressTask(contexExt.Background(), uuid)
if err != nil {
log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
} else {
attachment.DecompressState = models.DecompressStateIng
err = models.UpdateAttachment(attachment)
if typeCloudBrain == models.TypeCloudBrainOne {
err = worker.SendDecompressTask(contexExt.Background(), uuid)
if err != nil { if err != nil {
log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
} else {
attachment.DecompressState = models.DecompressStateIng
err = models.UpdateAttachment(attachment)
if err != nil {
log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
}
} }
} }
//todo:decompress type_two
} }
} }


@@ -340,9 +390,16 @@ func UpdateAttachmentDecompressState(ctx *context.Context) {


func GetSuccessChunks(ctx *context.Context) { func GetSuccessChunks(ctx *context.Context) {
fileMD5 := ctx.Query("md5") fileMD5 := ctx.Query("md5")
typeCloudBrain := ctx.QueryInt("type")
var chunks string var chunks string


fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID)
err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}

fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
if err != nil { if err != nil {
if models.IsErrFileChunkNotExist(err) { if models.IsErrFileChunkNotExist(err) {
ctx.JSON(200, map[string]string{ ctx.JSON(200, map[string]string{
@@ -357,12 +414,22 @@ func GetSuccessChunks(ctx *context.Context) {
return return
} }


isExist, err := storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
if err != nil {
ctx.ServerError("HasObject failed", err)
return
isExist := false
if typeCloudBrain == models.TypeCloudBrainOne {
isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
if err != nil {
ctx.ServerError("HasObject failed", err)
return
}
} else {
isExist, err = storage.ObsHasObject(models.AttachmentRelativePath(fileChunk.UUID))
if err != nil {
ctx.ServerError("ObsHasObject failed", err)
return
}
} }



if isExist { if isExist {
if fileChunk.IsUploaded == models.FileNotUploaded { if fileChunk.IsUploaded == models.FileNotUploaded {
log.Info("the file has been uploaded but not recorded") log.Info("the file has been uploaded but not recorded")
@@ -380,10 +447,18 @@ func GetSuccessChunks(ctx *context.Context) {
} }
} }


chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
if err != nil {
ctx.ServerError("GetPartInfos failed", err)
return
if typeCloudBrain == models.TypeCloudBrainOne {
chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
if err != nil {
ctx.ServerError("GetPartInfos failed", err)
return
}
} else {
chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
if err != nil {
ctx.ServerError("GetObsPartInfos failed", err)
return
}
} }
} }


@@ -445,6 +520,13 @@ func NewMultipart(ctx *context.Context) {
return return
} }


typeCloudBrain := ctx.QueryInt("type")
err = checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}

if setting.Attachment.StoreType == storage.MinioStorageType { if setting.Attachment.StoreType == storage.MinioStorageType {
totalChunkCounts := ctx.QueryInt("totalChunkCounts") totalChunkCounts := ctx.QueryInt("totalChunkCounts")
if totalChunkCounts > minio_ext.MaxPartsCount { if totalChunkCounts > minio_ext.MaxPartsCount {
@@ -459,10 +541,19 @@ func NewMultipart(ctx *context.Context) {
} }


uuid := gouuid.NewV4().String() uuid := gouuid.NewV4().String()
uploadID, err := storage.NewMultiPartUpload(uuid)
if err != nil {
ctx.ServerError("NewMultipart", err)
return
var uploadID string
if typeCloudBrain == models.TypeCloudBrainOne {
uploadID, err = storage.NewMultiPartUpload(uuid)
if err != nil {
ctx.ServerError("NewMultipart", err)
return
}
} else {
uploadID, err = storage.NewObsMultiPartUpload(uuid)
if err != nil {
ctx.ServerError("NewObsMultiPartUpload", err)
return
}
} }


_, err = models.InsertFileChunk(&models.FileChunk{ _, err = models.InsertFileChunk(&models.FileChunk{
@@ -472,6 +563,7 @@ func NewMultipart(ctx *context.Context) {
Md5: ctx.Query("md5"), Md5: ctx.Query("md5"),
Size: fileSize, Size: fileSize,
TotalChunks: totalChunkCounts, TotalChunks: totalChunkCounts,
Type: typeCloudBrain,
}) })


if err != nil { if err != nil {
@@ -495,25 +587,94 @@ func GetMultipartUploadUrl(ctx *context.Context) {
partNumber := ctx.QueryInt("chunkNumber") partNumber := ctx.QueryInt("chunkNumber")
size := ctx.QueryInt64("size") size := ctx.QueryInt64("size")


if size > minio_ext.MinPartSize {
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
typeCloudBrain := ctx.QueryInt("type")
err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return return
} }


url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
url := ""
if typeCloudBrain == models.TypeCloudBrainOne {
if size > minio_ext.MinPartSize {
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
return
}

url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
if err != nil {
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
return
}
} else {
url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
if err != nil {
ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
return
}
}

ctx.JSON(200, map[string]string{
"url": url,
})
}

func GetObsKey(ctx *context.Context) {
uuid := gouuid.NewV4().String()
key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")

ctx.JSON(200, map[string]string{
"uuid": uuid,
"key": key,
"access_key_id": setting.AccessKeyID,
"secret_access_key": setting.SecretAccessKey,
"server": setting.Endpoint,
"bucket": setting.Bucket,
})
}

func UploadPart(ctx *context.Context) {
tmp, err := ctx.Req.Body().String()
log.Info(tmp)

err = ctx.Req.ParseMultipartForm(100*1024*1024)
if err != nil {
ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err))
return
}

file, fileHeader, err := ctx.Req.FormFile("file")
log.Info(ctx.Req.Form.Get("file"))
if err != nil {
ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err))
return
}



log.Info(fileHeader.Filename)

etag, err := storage.ObsUploadPart("", "", 1, 1, file)
if err != nil { if err != nil {
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err))
return return
} }


ctx.JSON(200, map[string]string{ ctx.JSON(200, map[string]string{
"url": url,
"etag": etag,
}) })
} }


func CompleteMultipart(ctx *context.Context) { func CompleteMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid") uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID") uploadID := ctx.Query("uploadID")
typeCloudBrain := ctx.QueryInt("type")

err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}


fileChunk, err := models.GetFileChunkByUUID(uuid) fileChunk, err := models.GetFileChunkByUUID(uuid)
if err != nil { if err != nil {
@@ -525,10 +686,18 @@ func CompleteMultipart(ctx *context.Context) {
return return
} }


_, err = storage.CompleteMultiPartUpload(uuid, uploadID)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
if typeCloudBrain == models.TypeCloudBrainOne {
_, err = storage.CompleteMultiPartUpload(uuid, uploadID)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
}
} else {
err = storage.CompleteObsMultiPartUpload(uuid, uploadID)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
return
}
} }


fileChunk.IsUploaded = models.FileUploaded fileChunk.IsUploaded = models.FileUploaded
@@ -546,6 +715,7 @@ func CompleteMultipart(ctx *context.Context) {
Name: ctx.Query("file_name"), Name: ctx.Query("file_name"),
Size: ctx.QueryInt64("size"), Size: ctx.QueryInt64("size"),
DatasetID: ctx.QueryInt64("dataset_id"), DatasetID: ctx.QueryInt64("dataset_id"),
Type: typeCloudBrain,
}) })


if err != nil { if err != nil {
@@ -704,3 +874,11 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
}) })
return return
} }

func checkTypeCloudBrain(typeCloudBrain int) error {
if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
log.Error("type error:", typeCloudBrain)
return errors.New("type error")
}
return nil
}

+ 1
- 1
routers/repo/cloudbrain.go View File

@@ -46,7 +46,7 @@ func CloudBrainIndex(ctx *context.Context) {
PageSize: setting.UI.IssuePagingNum, PageSize: setting.UI.IssuePagingNum,
}, },
RepoID: repo.ID, RepoID: repo.ID,
// SortType: sortType,
Type: models.TypeCloudBrainOne,
}) })
if err != nil { if err != nil {
ctx.ServerError("Cloudbrain", err) ctx.ServerError("Cloudbrain", err)


+ 2
- 1
routers/repo/dataset.go View File

@@ -49,7 +49,7 @@ func DatasetIndex(ctx *context.Context) {
ctx.NotFound("GetDatasetByRepo", err) ctx.NotFound("GetDatasetByRepo", err)
return return
} }
err = models.GetDatasetAttachments(dataset)
err = models.GetDatasetAttachments(ctx.QueryInt("type"), dataset)
if err != nil { if err != nil {
ctx.ServerError("GetDatasetAttachments", err) ctx.ServerError("GetDatasetAttachments", err)
return return
@@ -80,6 +80,7 @@ func DatasetIndex(ctx *context.Context) {
ctx.Data["Attachments"] = attachments ctx.Data["Attachments"] = attachments
ctx.Data["IsOwner"] = true ctx.Data["IsOwner"] = true
ctx.Data["StoreType"] = setting.Attachment.StoreType ctx.Data["StoreType"] = setting.Attachment.StoreType
ctx.Data["Type"] = ctx.QueryInt("type")


renderAttachmentSettings(ctx) renderAttachmentSettings(ctx)




+ 247
- 0
routers/repo/modelarts.go View File

@@ -0,0 +1,247 @@
package repo

import (
"code.gitea.io/gitea/modules/modelarts"
"errors"
"github.com/unknwon/com"
"strconv"
"strings"
"time"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
)

const (
tplModelArtsIndex base.TplName = "repo/modelarts/index"
tplModelArtsNew base.TplName = "repo/modelarts/new"
tplModelArtsShow base.TplName = "repo/modelarts/show"
)

// MustEnableDataset check if repository enable internal cb
func MustEnableModelArts(ctx *context.Context) {
if !ctx.Repo.CanRead(models.UnitTypeCloudBrain) {
ctx.NotFound("MustEnableCloudbrain", nil)
return
}
}
func ModelArtsIndex(ctx *context.Context) {
MustEnableModelArts(ctx)
repo := ctx.Repo.Repository
page := ctx.QueryInt("page")
if page <= 0 {
page = 1
}

ciTasks, count, err := models.Cloudbrains(&models.CloudbrainsOptions{
ListOptions: models.ListOptions{
Page: page,
PageSize: setting.UI.IssuePagingNum,
},
RepoID: repo.ID,
Type: models.TypeCloudBrainTwo,
})
if err != nil {
ctx.ServerError("Cloudbrain", err)
return
}

for i, task := range ciTasks {
if task.Status == string(models.JobRunning) {
ciTasks[i].CanDebug = true
} else {
ciTasks[i].CanDebug = false
}
}

pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5)
pager.SetDefaultParams(ctx)
ctx.Data["Page"] = pager

ctx.Data["PageIsCloudBrain"] = true
ctx.Data["Tasks"] = ciTasks
ctx.HTML(200, tplModelArtsIndex)
}

func ModelArtsNew(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true

t := time.Now()
var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:]
ctx.Data["job_name"] = jobName

attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID)
if err != nil {
ctx.ServerError("GetAllUserAttachments failed:", err)
return
}

ctx.Data["attachments"] = attachs
ctx.Data["dataset_path"] = modelarts.DataSetMountPath
ctx.Data["env"] = modelarts.NotebookEnv
ctx.Data["notebook_type"] = modelarts.NotebookType
ctx.Data["flavor"] = modelarts.FlavorInfo
ctx.HTML(200, tplModelArtsNew)
}

func ModelArtsCreate(ctx *context.Context, form auth.CreateModelArtsForm) {
ctx.Data["PageIsCloudBrain"] = true
jobName := form.JobName
uuid := form.Attachment
description := form.Description
//repo := ctx.Repo.Repository

err := modelarts.GenerateTask(ctx, jobName, uuid, description)
if err != nil {
ctx.RenderWithErr(err.Error(), tplModelArtsNew, &form)
return
}

ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts")
}

func ModelArtsShow(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true

var jobID = ctx.Params(":jobid")
task, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
ctx.Data["error"] = err.Error()
ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil)
return
}

result, err := modelarts.GetJob(jobID)
if err != nil {
ctx.Data["error"] = err.Error()
ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil)
return
}

if result != nil {
task.Status = result.Status
err = models.UpdateJob(task)
if err != nil {
ctx.Data["error"] = err.Error()
ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil)
return
}

createTime, _ := com.StrTo(result.CreationTimestamp).Int64()
result.CreateTime = time.Unix(int64(createTime/1000), 0).Format("2006-01-02 15:04:05")
endTime, _ := com.StrTo(result.LatestUpdateTimestamp).Int64()
result.LatestUpdateTime = time.Unix(int64(endTime/1000), 0).Format("2006-01-02 15:04:05")
result.QueuingInfo.BeginTime = time.Unix(int64(result.QueuingInfo.BeginTimestamp/1000), 0).Format("2006-01-02 15:04:05")
result.QueuingInfo.EndTime = time.Unix(int64(result.QueuingInfo.EndTimestamp/1000), 0).Format("2006-01-02 15:04:05")
}

ctx.Data["task"] = task
ctx.Data["jobID"] = jobID
ctx.Data["result"] = result
ctx.HTML(200, tplModelArtsShow)
}

func ModelArtsDebug(ctx *context.Context) {
var jobID = ctx.Params(":jobid")
_, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
ctx.ServerError("GetCloudbrainByJobID failed", err)
return
}

result, err := modelarts.GetJob(jobID)
if err != nil {
ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil)
return
}

res, err := modelarts.GetJobToken(jobID)
if err != nil {
ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil)
return
}


urls := strings.Split(result.Spec.Annotations.Url, "/")
urlPrefix := result.Spec.Annotations.TargetDomain
for i, url := range urls {
if i > 2 {
urlPrefix += "/" + url
}
}

//urlPrefix := result.Spec.Annotations.TargetDomain + "/modelarts/internal/hub/notebook/user/" + task.JobID
log.Info(urlPrefix)
debugUrl := urlPrefix + "?token=" + res.Token
ctx.Redirect(debugUrl)
}

func ModelArtsStop(ctx *context.Context) {
var jobID = ctx.Params(":jobid")
log.Info(jobID)
task, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
ctx.ServerError("GetCloudbrainByJobID failed", err)
return
}

if task.Status != string(models.JobRunning) {
log.Error("the job(%s) is not running", task.JobName)
ctx.ServerError("the job is not running", errors.New("the job is not running"))
return
}

param := models.NotebookAction{
Action: models.ActionStop,
}
res, err := modelarts.StopJob(jobID, param)
if err != nil {
log.Error("StopJob(%s) failed:%v", task.JobName, err.Error())
ctx.ServerError("StopJob failed", err)
return
}

task.Status = res.CurrentStatus
err = models.UpdateJob(task)
if err != nil {
ctx.ServerError("UpdateJob failed", err)
return
}

ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts")
}

func ModelArtsDel(ctx *context.Context) {
var jobID = ctx.Params(":jobid")
task, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
ctx.ServerError("GetCloudbrainByJobID failed", err)
return
}

if task.Status != string(models.JobStopped) {
log.Error("the job(%s) has not been stopped", task.JobName)
ctx.ServerError("the job has not been stopped", errors.New("the job has not been stopped"))
return
}

_, err = modelarts.DelJob(jobID)
if err != nil {
log.Error("DelJob(%s) failed:%v", task.JobName, err.Error())
ctx.ServerError("DelJob failed", err)
return
}

err = models.DeleteJob(task)
if err != nil {
ctx.ServerError("DeleteJob failed", err)
return
}

ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts")
}


+ 14
- 0
routers/routes/routes.go View File

@@ -529,6 +529,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/get_multipart_url", repo.GetMultipartUploadUrl) m.Get("/get_multipart_url", repo.GetMultipartUploadUrl)
m.Post("/complete_multipart", repo.CompleteMultipart) m.Post("/complete_multipart", repo.CompleteMultipart)
m.Post("/update_chunk", repo.UpdateMultipart) m.Post("/update_chunk", repo.UpdateMultipart)
m.Post("/upload_part", repo.UploadPart)
m.Get("/get_obs_key", repo.GetObsKey)
}, reqSignIn) }, reqSignIn)


m.Group("/attachments", func() { m.Group("/attachments", func() {
@@ -913,6 +915,18 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate)
}, context.RepoRef()) }, context.RepoRef())


m.Group("/modelarts", func() {
m.Get("", reqRepoCloudBrainReader, repo.ModelArtsIndex)
m.Group("/:jobid", func() {
m.Get("", reqRepoCloudBrainReader, repo.ModelArtsShow)
m.Get("/debug", reqRepoCloudBrainReader, repo.ModelArtsDebug)
m.Post("/stop", reqRepoCloudBrainWriter, repo.ModelArtsStop)
m.Post("/del", reqRepoCloudBrainWriter, repo.ModelArtsDel)
})
m.Get("/create", reqRepoCloudBrainWriter, repo.ModelArtsNew)
m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsForm{}), repo.ModelArtsCreate)
}, context.RepoRef())

m.Group("/blockchain", func() { m.Group("/blockchain", func() {
m.Get("", repo.BlockChainIndex) m.Get("", repo.BlockChainIndex)
}, context.RepoRef()) }, context.RepoRef())


+ 1
- 0
templates/base/head.tmpl View File

@@ -175,6 +175,7 @@
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/theme-{{DefaultTheme}}.css?v={{MD5 AppVer}}"> <link rel="stylesheet" href="{{StaticUrlPrefix}}/css/theme-{{DefaultTheme}}.css?v={{MD5 AppVer}}">
{{end}} {{end}}
{{template "custom/header" .}} {{template "custom/header" .}}

</head> </head>
<body> <body>
{{template "custom/body_outer_pre" .}} {{template "custom/body_outer_pre" .}}


+ 0
- 2
templates/explore/dataset_list.tmpl View File

@@ -31,9 +31,7 @@
<div class="ui right metas"> <div class="ui right metas">
<span class="text grey">{{svg "octicon-tasklist" 16}} {{$.i18n.Tr (printf "dataset.task.%s" .Task)}}</span> <span class="text grey">{{svg "octicon-tasklist" 16}} {{$.i18n.Tr (printf "dataset.task.%s" .Task)}}</span>
<span class="text grey">{{svg "octicon-tag" 16}}{{$.i18n.Tr (printf "dataset.category.%s" .Category)}}</span> <span class="text grey">{{svg "octicon-tag" 16}}{{$.i18n.Tr (printf "dataset.category.%s" .Category)}}</span>
{{if ne .DownloadTimes 0}}
<span class="text grey">{{svg "octicon-flame" 16}} {{.DownloadTimes}}</span> <span class="text grey">{{svg "octicon-flame" 16}} {{.DownloadTimes}}</span>
{{end}}
</div> </div>
</div> </div>
<div class="description"> <div class="description">


+ 1
- 1
templates/repo/cloudbrain/index.tmpl View File

@@ -198,7 +198,7 @@
<!-- 中间云脑和新建任务按钮 --> <!-- 中间云脑和新建任务按钮 -->
<div class="ui three column stack able grid"> <div class="ui three column stack able grid">
<div class="column"> <div class="column">
<h2>{{.i18n.Tr "repo.cloudbrain"}}</h2>
<h2>{{.i18n.Tr "repo.cloudbrain1"}}</h2>
</div> </div>


<div class="column"> <div class="column">


+ 14
- 2
templates/repo/datasets/dataset.tmpl View File

@@ -2,8 +2,20 @@
<div class="field required dataset-files"> <div class="field required dataset-files">
<label>{{.i18n.Tr "dataset.file"}}</label> <label>{{.i18n.Tr "dataset.file"}}</label>
<div class="files"></div> <div class="files"></div>
<div class="ui dropzone" id="dataset" data-upload-url="{{AppSubUrl}}/attachments" data-accepts="{{.AttachmentAllowedTypes}}" data-remove-url="{{AppSubUrl}}/attachments/delete" data-csrf="{{.CsrfToken}}" dataset-id={{.dataset.ID}} data-max-file="100" data-dataset-id="{{.dataset.ID}}" data-max-size="{{.AttachmentMaxSize}}" data-default-message="{{.i18n.Tr "dropzone.default_message"}}" data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}" data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}" data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}">

<div class="ui dropzone"
id="dataset"
data-upload-url="{{AppSubUrl}}/attachments"
data-accepts="{{.AttachmentAllowedTypes}}"
data-remove-url="{{AppSubUrl}}/attachments/delete"
data-csrf="{{.CsrfToken}}"
dataset-id={{.dataset.ID}}
data-max-file="100"
data-dataset-id="{{.dataset.ID}}"
data-max-size="{{.AttachmentMaxSize}}"
data-default-message="{{.i18n.Tr "dropzone.default_message"}}"
data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}"
data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}"
data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}">
</div> </div>
</div> </div>
</div> </div>

+ 2
- 2
templates/repo/datasets/dataset_list.tmpl View File

@@ -3,7 +3,7 @@
<div class="ui grid item" id="{{.UUID}}"> <div class="ui grid item" id="{{.UUID}}">
<div class="row"> <div class="row">
<div class="{{if $.Permission.CanWrite $.UnitTypeDatasets}}five{{else}}nine{{end}} wide column"> <div class="{{if $.Permission.CanWrite $.UnitTypeDatasets}}five{{else}}nine{{end}} wide column">
<a class="title" href="{{.DownloadURL}}">
<a class="title" href="{{.DownloadURL}}?type={{$.Type}}">
<span class="fitted">{{svg "octicon-cloud-download" 16}}</span> {{.Name}} <span class="fitted">{{svg "octicon-cloud-download" 16}}</span> {{.Name}}
</a> </a>
</div> </div>
@@ -14,7 +14,7 @@
<span class="ui text center" data-tooltip='{{$.i18n.Tr "dataset.download_count"}}' data-position="bottom right">{{svg "octicon-flame" 16}} {{(.DownloadCount | PrettyNumber)}}</span> <span class="ui text center" data-tooltip='{{$.i18n.Tr "dataset.download_count"}}' data-position="bottom right">{{svg "octicon-flame" 16}} {{(.DownloadCount | PrettyNumber)}}</span>
</div> </div>


<div class="one wide column">
<div class="one wide column" style="{{if ne $.Type 0}}visibility: hidden;{{end}}">
<span class="ui text center clipboard" data-clipboard-text="{{.DownloadURL}}" data-tooltip='{{$.i18n.Tr "dataset.copy_url"}}' data-clipboard-action="copy">{{svg "octicon-file" 16}}</span> <span class="ui text center clipboard" data-clipboard-text="{{.DownloadURL}}" data-tooltip='{{$.i18n.Tr "dataset.copy_url"}}' data-clipboard-action="copy">{{svg "octicon-file" 16}}</span>
</div> </div>




+ 14
- 7
templates/repo/datasets/index.tmpl View File

@@ -1,6 +1,17 @@
{{template "base/head" .}} {{template "base/head" .}}
<div class="repository release dataset-list view"> <div class="repository release dataset-list view">
{{template "repo/header" .}}
{{template "repo/header" .}}
<script>
$(document).ready(function() {
url = window.location.href
type = url.split('?type=')[1]
if (type == 0){
$('.contorl_component').attr("id", 'minioUploader')
}else{
$('.contorl_component').attr("id", 'obsUploader')
}
});
</script>
<form class="ui container" action="{{.Link}}" method="post"> <form class="ui container" action="{{.Link}}" method="post">
<input name="id" value="{{.dataset.ID}}" type="hidden" /> <input name="id" value="{{.dataset.ID}}" type="hidden" />
<!-- <!--
@@ -92,22 +103,18 @@
data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}" data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}"
data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}" data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}"
data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}" data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}"

data-file-status='{{.i18n.Tr "dropzone.file_status"}}' data-file-status='{{.i18n.Tr "dropzone.file_status"}}'
data-file-init-status='{{.i18n.Tr "dropzone.file_init_status"}}' data-file-init-status='{{.i18n.Tr "dropzone.file_init_status"}}'
data-waitting-uploading='{{.i18n.Tr "dropzone.waitting_uploading"}}' data-waitting-uploading='{{.i18n.Tr "dropzone.waitting_uploading"}}'
data-md5-computing='{{.i18n.Tr "dropzone.md5_computing"}}' data-md5-computing='{{.i18n.Tr "dropzone.md5_computing"}}'
data-obs-connecting='{{.i18n.Tr "dropzone.obs-connecting"}}'
data-loading-file='{{.i18n.Tr "dropzone.loading_file"}}' data-loading-file='{{.i18n.Tr "dropzone.loading_file"}}'
data-upload-complete='{{.i18n.Tr "dropzone.upload_complete"}}' data-upload-complete='{{.i18n.Tr "dropzone.upload_complete"}}'
data-uploading='{{.i18n.Tr "dropzone.uploading"}}' data-uploading='{{.i18n.Tr "dropzone.uploading"}}'
data-failed='{{.i18n.Tr "dropzone.failed"}}' data-failed='{{.i18n.Tr "dropzone.failed"}}'
> >
</div> </div>
{{if eq .StoreType "minio"}}
<div id="minioUploader"></div>
{{else}}
<div style="margin: 2em 0;"> {{.i18n.Tr "dropzone.enable_minio_support"}} </div>
{{end}}
<div class="contorl_component"></div>
{{end}} {{end}}
</div> </div>
</div> </div>


+ 88
- 4
templates/repo/header.tmpl View File

@@ -1,3 +1,8 @@
<style>
.ui.form .ui.button{
margin-bottom: 15px;
}
</style>
<div class="header-wrapper"> <div class="header-wrapper">
{{with .Repository}} {{with .Repository}}
<div class="ui container"> <div class="ui container">
@@ -98,8 +103,9 @@
{{end}} {{end}}


{{if .Permission.CanRead $.UnitTypeDatasets}} {{if .Permission.CanRead $.UnitTypeDatasets}}
<a class="{{if .PageIsDataset}}active{{end}} item" href="{{.RepoLink}}/datasets">
<a class="{{if .PageIsDataset}}active{{end}} item dataset" >
{{svg "octicon-inbox" 16}} {{.i18n.Tr "datasets"}} {{svg "octicon-inbox" 16}} {{.i18n.Tr "datasets"}}
<span style="display:none" class="dataset_link">{{.RepoLink}}</span>
</a> </a>
{{end}} {{end}}


@@ -140,12 +146,13 @@
{{end}} {{end}}


{{if .Permission.CanRead $.UnitTypeCloudBrain}} {{if .Permission.CanRead $.UnitTypeCloudBrain}}
<a class="{{if .PageIsCloudBrain}}active{{end}} item" href="{{.RepoLink}}/cloudbrain">
<a class="{{if .PageIsCloudBrain}}active{{end}} item cloudbrain">
{{svg "octicon-server" 16}} {{.i18n.Tr "repo.cloudbrain"}} {{svg "octicon-server" 16}} {{.i18n.Tr "repo.cloudbrain"}}
<span style="display:none" class="cloudbrain_link">{{.RepoLink}}</span>
</a> </a>
{{end}} {{end}}


<a class="{{if .PageIsBlockChain}}active{{end}} item " href="{{.RepoLink}}/blockchain">
<a class="{{if .PageIsBlockChain}}active{{end}} item ">
{{svg "octicon-law" 16}} {{svg "octicon-law" 16}}
{{.i18n.Tr "repo.balance"}} {{.i18n.Tr "repo.balance"}}
</a> </a>
@@ -163,4 +170,81 @@
{{end}} {{end}}
</div> </div>
<div class="ui tabs divider"></div> <div class="ui tabs divider"></div>
</div>
</div>

<div class="ui select_cloudbrain modal">
<div class="header">
{{$.i18n.Tr "repo.cloudbrain_selection"}}
</div>
<div class="content">
<div class="ui form" method="post">
<div class="grouped fields">
<label for="CloudBrain">{{$.i18n.Tr "repo.cloudbrain_platform_selection"}}</label>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="CloudBrain" checked tabindex="0" class="hidden" value="0">
<label>{{$.i18n.Tr "repo.cloudbrain1"}}</label>
</div>
</div>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="CloudBrain" tabindex="0" class="hidden" value="1">
<label>{{$.i18n.Tr "repo.cloudbrain2"}}</label>
</div>
</div>
</div>
<div class="actions">
<div class="ui positive right labeled icon button">
{{$.i18n.Tr "repo.confirm_choice"}}
<i class="checkmark icon"></i>
</div>
</div>
</div>
</div>


</div>
<script src="https://cdn.jsdelivr.net/npm/jquery@3.2.1/dist/jquery.min.js"></script>
<script>
// 点击云脑进行选择云脑平台并进入相应的界面
$('.item.cloudbrain').click(function(){
$('.ui.select_cloudbrain.modal')
.modal('closable', false)
.modal('show');
// $('.ui.select_cloudbrain.modal').modal('show');
$('.ui.radio.checkbox').checkbox();

var repolink = $(".cloudbrain_link").text()
$(".ui.positive.right.icon.button").click(function(){
// 声明一个变量来接收以及获取单选框选择的情况
var checked_radio = $("input[type='radio']:checked").val()

if(checked_radio=='0'){
window.location.href = repolink+'/cloudbrain'
}else if(checked_radio=='1'){
window.location.href = repolink+'/modelarts'
}else{
return;
}
})
})

// 点击数据集进行选择云脑平台并进入相应的界面
$('.item.dataset').click(function(){
$('.ui.select_cloudbrain.modal')
.modal('closable', false)
.modal('show');
$('.ui.radio.checkbox').checkbox();

var repolink = $(".dataset_link").text()
console.log(repolink)
$(".ui.positive.right.icon.button").click(function(){
// 声明一个变量来接收以及获取单选框选择的情况
var checked_radio = $("input[type='radio']:checked").val()
$('.ui.select_cloudbrain.modal')
.modal('show');
// 向后端传递对象
window.location.href = repolink + "/datasets?type=" + checked_radio
})
})
</script>

+ 479
- 0
templates/repo/modelarts/index.tmpl View File

@@ -0,0 +1,479 @@
<!-- 头部导航栏 -->
{{template "base/head" .}}

<style>
#deletemodel {
width: 100%;
height: 100%;
}
/* 弹窗 */

#mask {
position: fixed;
top: 0px;
left: 0px;
right: 0px;
bottom: 0px;
filter: alpha(opacity=60);
background-color: #777;
z-index: 1000;
display: none;
opacity: 0.8;
-moz-opacity: 0.5;
padding-top: 100px;
color: #000000
}

#loadingPage {
margin: 200px auto;
width: 50px;
height: 40px;
text-align: center;
font-size: 10px;
display: block;
}

#loadingPage>div {
background-color: green;
height: 100%;
width: 6px;
display: inline-block;
-webkit-animation: sk-stretchdelay 1.2s infinite ease-in-out;
animation: sk-stretchdelay 1.2s infinite ease-in-out;
}

#loadingPage .rect2 {
-webkit-animation-delay: -1.1s;
animation-delay: -1.1s;
}

#loadingPage .rect3 {
-webkit-animation-delay: -1.0s;
animation-delay: -1.0s;
}

#loadingPage .rect4 {
-webkit-animation-delay: -0.9s;
animation-delay: -0.9s;
}

#loadingPage .rect5 {
-webkit-animation-delay: -0.8s;
animation-delay: -0.8s;
}

@-webkit-keyframes sk-stretchdelay {
0%,
40%,
100% {
-webkit-transform: scaleY(0.4)
}
20% {
-webkit-transform: scaleY(1.0)
}
}

@keyframes sk-stretchdelay {
0%,
40%,
100% {
transform: scaleY(0.4);
-webkit-transform: scaleY(0.4);
}
20% {
transform: scaleY(1.0);
-webkit-transform: scaleY(1.0);
}
}
/* 消息框 */

.alert {
display: none;
position: fixed;
width: 100%;
z-index: 1001;
padding: 15px;
border: 1px solid transparent;
border-radius: 4px;
text-align: center;
font-weight: bold;
}

.alert-success {
color: #3c763d;
background-color: #dff0d8;
border-color: #d6e9c6;
}

.alert-info {
color: #31708f;
background-color: #d9edf7;
border-color: #bce8f1;
}

.alert-warning {
color: #8a6d3b;
background-color: #fcf8e3;
border-color: #faebcc;
}

.alert-danger {
color: #a94442;
background-color: #f2dede;
border-color: #ebccd1;
}

.pusher {
width: calc(100% - 260px);
box-sizing: border-box;
}
/* 弹窗 (background) */

#imageModal {
display: none;
position: fixed;
z-index: 1;
left: 0;
top: 0;
width: 100%;
height: 100%;
overflow: auto;
background-color: rgb(0, 0, 0);
background-color: rgba(0, 0, 0, 0.4);
}
/* 弹窗内容 */

.modal-content {
background-color: #fefefe;
margin: 15% auto;
padding: 20px;
border: 1px solid #888;
width: 30%;
}
/* 关闭按钮 */

.close {
color: #aaa;
float: right;
font-size: 28px;
font-weight: bold;
}

.close:hover,
.close:focus {
color: black;
text-decoration: none;
cursor: pointer;
}

.dis {
margin-bottom: 20px;
}

.disabled {
cursor: pointer;
pointer-events: none;
}
</style>

<!-- 弹窗 -->
<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>

<!-- 提示框 -->
<div class="alert"></div>

<div class="repository release dataset-list view">
{{template "repo/header" .}}
<!-- 列表容器 -->
<div class="ui container">

<!-- 中间云脑和新建任务按钮 -->
<div class="ui three column stack able grid">
<div class="column">
<h2>{{.i18n.Tr "repo.cloudbrain2"}}</h2>
</div>

<div class="column">
</div>

<div class="column right aligned">
{{if .Permission.CanWrite $.UnitTypeCloudBrain}}
<a class="ui green button" href="{{.RepoLink}}/modelarts/create">{{.i18n.Tr "repo.cloudbrain.new"}}</a> {{end}}
</div>
</div>

<!-- 中间分割线 -->
<div class="ui divider"></div>

<!-- 中下列表展示区 -->
<div class="ui grid">
<div class="row">
<div class="ui sixteen wide column">

<!-- 排序区 -->
<div class="ui sixteen wide column">
<div class="ui two column stackable grid">
<div class="column">
</div>
<div class="column right aligned">
<div class="ui right dropdown type jump item">
<span class="text">
{{.i18n.Tr "repo.issues.filter_sort"}}<i class="dropdown icon"></i>
</span>
</div>
</div>
</div>
</div>

<!-- 任务展示 -->
<div class="dataset list">
{{range .Tasks}}
<div class="ui grid stackable item">
<div class="row">

<!-- 任务名 -->
<div class="four wide column">
<a class="title" href="{{$.Link}}/{{.JobID}}">
<span class="fitted">{{svg "octicon-tasklist" 16}}</span>
<span class="fitted">{{.JobName}}</span>
</a>
</div>

<!--任务状态 -->
<div class="three wide column job-status" id="{{.JobID}}" data-repopath="{{$.RepoRelPath}}" data-jobid="{{.JobID}}">
{{.Status}}
</div>

<!-- 任务创建时间 -->
<div class="three wide column">
<span class="ui text center">{{svg "octicon-flame" 16}} {{TimeSinceUnix .CreatedUnix $.Lang}}</span>
</div>

<!-- 查看 -->
<div class="one wide column">
<span class="ui text clipboard">
<a class="title" href="{{$.Link}}/{{.JobID}}">
<span class="fitted">查看</span>
</a>
</span>
</div>

<!-- 删除任务 -->
<div class="one wide column">
<div class="ui text center clipboard">
<form id="delForm-{{.JobID}}" action="{{if ne .Status "STOPPED"}}javascript:void(0){{else}}{{$.Link}}/{{.JobID}}/del{{end}}" method="post">
{{$.CsrfTokenHtml}}
<a class="fitted" onclick="assertDelete(this)" style="{{if ne .Status "STOPPED"}}color:#CCCCCC{{end}}; font-size:16px; font-weight:bold">删除</a>
</form>
</div>
</div>

<!-- 调试 -->
<div class="one wide column">
<div class="ui text center clipboard">
<a class="title" onclick="stop(this)" href="{{if not .CanDebug}}javascript:void(0){{else}}{{$.Link}}/{{.JobID}}/debug{{end}}" style="{{if not .CanDebug}}color:#CCCCCC{{end}}">
<span class="fitted">调试</span>
</a>
</div>
</div>

<!-- 停止 -->
<div class="one wide column">
<div class="ui text center clipboard">
<form id="stopForm-{{.JobID}}" action="{{if ne .Status "RUNNING"}}javascript:void(0){{else}}{{$.Link}}/{{.JobID}}/stop{{end}}" method="post">
{{$.CsrfTokenHtml}}
<a class="fitted" onclick="document.getElementById('stopForm-{{.JobID}}').submit();" style="{{if ne .Status "RUNNING"}}color:#CCCCCC{{end}}; font-size:16px; font-weight:bold">停止</a>
</form>
</div>
</div>

<!-- 镜像列表弹窗 -->
<div id="imageModal" class="modal" style="display: none;">
<div class="modal-content">
<span class="close">&times;</span>

<!-- 表格 -->
<form id="commitImageForm" action="{{$.Link}}/{{.JobID}}/commit_image" method="post" target="iframeContent">
{{$.CsrfTokenHtml}}
<p>提交任务镜像</p>
<div class="ui divider"></div>

<div class="inline required field dis">
<label>镜像标签:</label>
<input name="tag" id="image_tag" tabindex="3" autofocus required maxlength="255" style="width:75%">
</div>

<div class="inline required field" style="position:relative;height:180px;">
<div style="height:20px;width:75px;">
<label>镜像描述:</label>
</div>
<div style="position:absolute;left:75px;top:0;width:75%">
<textarea name="description" rows="10" style="width:100%"></textarea>
</div>
</div>

<div class="ui divider"></div>

<div class="inline field">
<label></label>
<button class="ui green button" onclick="showmask()">
{{$.i18n.Tr "repo.cloudbrain.commit_image"}}
</button>
</div>
</form>
</div>
</div>

</div>
</div>
{{end}} {{template "base/paginate" .}}
</div>

</div>
</div>
</div>

</div>

</div>
</div>

</div>

<!-- 确认模态框 -->
<div id="deletemodel">
<div class="ui basic modal">
<div class="ui icon header">
<i class="trash icon"></i> 删除任务
</div>

<div class="content">
<p>你确认删除该任务么?此任务一旦删除不可恢复。</p>
</div>
<div class="actions">
<div class="ui red basic inverted cancel button">
<i class="remove icon"></i> 取消操作
</div>
<div class="ui green basic inverted ok button">
<i class="checkmark icon"></i> 确定操作
</div>
</div>
</div>
</div>

</div>
{{template "base/footer" .}}

<script>
// 调试和评分新开窗口
function stop(obj) {
if (obj.style.color != "rgb(204, 204, 204)") {
obj.target = '_blank'
} else {
return
}
}

// 删除时用户确认
function assertDelete(obj) {
if (obj.style.color == "rgb(204, 204, 204)") {
return
} else {
var delId = obj.parentNode.id
flag = 1;
$('.ui.basic.modal')
.modal({
onDeny: function() {
flag = false
},
onApprove: function() {
document.getElementById(delId).submit()
flag = true
},
onHidden: function() {
if (flag == false) {
$('.alert').html('您已取消操作').removeClass('alert-success').addClass('alert-danger').show().delay(1500).fadeOut();
}
}
})
.modal('show')
}
}

// 加载任务状态
$(document).ready(function() {
$(".job-status").each((index, job) => {
const jobID = job.dataset.jobid;
const repoPath = job.dataset.repopath;
if (job.textContent.trim() == 'STOPPED') {
return
}

$.get(`/api/v1/repos/${repoPath}/modelarts/${jobID}`, (data) => {
const jobID = data.JobID
const status = data.JobStatus
$('#' + jobID).text(status)
// console.log(data)
}).fail(function(err) {
console.log(err);
});
});
});

// 获取弹窗
var modal = document.getElementById('imageModal');

// 打开弹窗的按钮对象
var btns = document.getElementsByClassName("imageBtn");

// 获取 <span> 元素,用于关闭弹窗
var spans = document.getElementsByClassName('close');

// 点击按钮打开弹窗
for (i = 0; i < btns.length; i++) {
btns[i].onclick = function() {
modal.style.display = "block";
}
}

// 点击 <span> (x), 关闭弹窗
for (i = 0; i < spans.length; i++) {
spans[i].onclick = function() {
modal.style.display = "none";
}
}

// 在用户点击其他地方时,关闭弹窗
window.onclick = function(event) {
if (event.target == modal) {
modal.style.display = "none";
}
}

// 显示弹窗,弹出相应的信息
function showmask() {
$('#imageModal').css('display', 'none')
$('#mask').css('display', 'block')

$("iframe[name=iframeContent]").on("load", function() {  
var responseText = $("iframe")[0].contentDocument.body.getElementsByTagName("pre")[0].innerHTML; 
var json1 = JSON.parse(responseText)
$('#mask').css('display', 'none')
parent.location.href

if (json1.result_code === "0") {
$('.alert').html('操作成功!').removeClass('alert-danger').addClass('alert-success').show().delay(1500).fadeOut();
} else {
$('.alert').html(json1.error_msg).removeClass('alert-success').addClass('alert-danger').show().delay(5000).fadeOut();
}
})
}
</script>

+ 184
- 0
templates/repo/modelarts/new.tmpl View File

@@ -0,0 +1,184 @@
{{template "base/head" .}}
<style>
/* 遮罩层css效果图 */
#mask {
position: fixed;
top: 0px;
left: 0px;
right: 0px;
bottom: 0px;
filter: alpha(opacity=60);
background-color: #777;
z-index: 1000;
display: none;
opacity: 0.8;
-moz-opacity: 0.5;
padding-top: 100px;
color: #000000
}
/* 加载圈css效果图 */
#loadingPage {
margin: 200px auto;
width: 50px;
height: 40px;
text-align: center;
font-size: 10px;
display: block;
}
#loadingPage>div {
background-color: green;
height: 100%;
width: 6px;
display: inline-block;
-webkit-animation: sk-stretchdelay 1.2s infinite ease-in-out;
animation: sk-stretchdelay 1.2s infinite ease-in-out;
}
#loadingPage .rect2 {
-webkit-animation-delay: -1.1s;
animation-delay: -1.1s;
}
#loadingPage .rect3 {
-webkit-animation-delay: -1.0s;
animation-delay: -1.0s;
}
#loadingPage .rect4 {
-webkit-animation-delay: -0.9s;
animation-delay: -0.9s;
}
#loadingPage .rect5 {
-webkit-animation-delay: -0.8s;
animation-delay: -0.8s;
}
@-webkit-keyframes sk-stretchdelay {
0%,
40%,
100% {
-webkit-transform: scaleY(0.4)
}
20% {
-webkit-transform: scaleY(1.0)
}
}
@keyframes sk-stretchdelay {
0%,
40%,
100% {
transform: scaleY(0.4);
-webkit-transform: scaleY(0.4);
}
20% {
transform: scaleY(1.0);
-webkit-transform: scaleY(1.0);
}
}
.inline.required.field.cloudbrain_benchmark {
display: none;
}
</style>

<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>
<div class="repository">
{{template "repo/header" .}}
<div class="repository new repo ui middle very relaxed page grid">
<div class="column">
{{template "base/alert" .}}
<form class="ui form" action="{{.Link}}" method="post">
{{.CsrfTokenHtml}}
<h3 class="ui top attached header">
{{.i18n.Tr "repo.cloudbrain.new"}}
</h3>
<div class="ui attached segment">
<!-- <br> -->
<div class="inline required field">
<label>任务名称</label>
<input name="job_name" id="cloudbrain_job_name" placeholder="任务名称" value="{{.job_name}}" tabindex="3" autofocus required maxlength="255">
</div>

<div class="inline required field">
<label>数据集</label>
<select id="cloudbrain_dataset" class="ui search dropdown" placeholder="选择数据集" style='width:385px' name="attachment">
{{range .attachments}}
<option name="attachment" value="{{.UUID}}">{{.Attachment.Name}}</option>

{{end}}
</select>
</div>

<div class="inline required field">
<label>工作环境</label>
<input name="de" id="cloudbrain_de" value="{{.env}}" tabindex="3" autofocus required maxlength="255" readonly="readonly">
</div>
<div class="inline required field">
<label>类型</label>
<input name="job_type" id="cloudbrain_job_type" value="{{.notebook_type}}" tabindex="3" autofocus required maxlength="255" readonly="readonly">
</div>
<div class="inline required field">
<label>规格</label>
<input name="flavor" id="cloudbrain_flavor" value="{{.flavor}}" tabindex="3" autofocus required maxlength="255" readonly="readonly">
</div>
<div class="inline required field">
<label>数据集存放路径</label>
<input name="dataset_path" id="cloudbrain_dataset_path" value="{{.dataset_path}}" tabindex="3" autofocus required maxlength="255" readonly="readonly">
</div>
<div class="inline field">
<label>描述</label>
<input name="description" id="cloudbrain_description" tabindex="3" autofocus maxlength="255">
</div>
<div class="inline field">
<label></label>
<button class="ui green button" onclick="showmask()">
{{.i18n.Tr "repo.cloudbrain.new"}}
</button>
<a class="ui button" href="/">{{.i18n.Tr "repo.cloudbrain.cancel"}}</a>
</div>
</div>
</form>
</div>
</div>
</div>
{{template "base/footer" .}}

<script>
// 点击按钮后遮罩层显示
function showmask() {
document.getElementById("mask").style.display = "block"
}

// 页面加载完毕后遮罩层隐藏
document.onreadystatechange = function() {
if (document.readyState === "complete") {
document.getElementById("mask").style.display = "none"
}
}

$('select.dropdown')
.dropdown();

$(function() {
$("#cloudbrain_job_type").change(function() {
if ($(this).val() == 'BENCHMARK') {
$(".cloudbrain_benchmark").show();
} else {
$(".cloudbrain_benchmark").hide();
}
})
})
</script>

+ 122
- 0
templates/repo/modelarts/show.tmpl View File

@@ -0,0 +1,122 @@
{{template "base/head" .}}
<div class="repository">
{{template "repo/header" .}}
<div class="repository new repo ui middle very relaxed page grid">
<div class="column">
{{template "base/alert" .}}

<h4 class="ui header" id="vertical-segment">
<a href="javascript:window.history.back();"><i class="arrow left icon"></i>返回</a>
</h4>
<div>
<div class="ui yellow segment">
{{with .task}}
<p>任务名称: {{.JobName}}</p>
{{end}}
</div>
<div class="ui green segment">
<p>任务结果:</p>
{{with .result}}
<table class="ui celled striped table">
<tbody>
<tr>
<td class="four wide"> 状态 </td>
<td> {{.Status}} </td>
</tr>
<tr>
<td> 开始时间 </td>
<td>{{.CreateTime}}</td>
</tr>
<tr>
<td> 最后更新时间 </td>
<td>{{.LatestUpdateTime}}</td>
</tr>
</tbody>
</table>
{{end}}
</div>
<div class="ui blue segment">
{{with .result}}
<table class="ui celled striped table">
<thead>
<tr> <th colspan="2"> 配置信息 </th> </tr>
</thead>
<tbody>
<tr>
<td class="four wide"> 开发环境类型 </td>
<td>{{.Profile.DeType}}</td>
</tr>
<tr>
<td> 硬件类型 </td>
<td>{{.Profile.FlavorType}}</td>
</tr>
</tbody>
</table>

<table class="ui celled striped table">
<thead>
<tr> <th colspan="2"> 机器规格详情 </th> </tr>
</thead>
<tbody>
<tr>
<td class="four wide"> 机器规格 </td>
<td> {{.Flavor}} </td>
</tr>
<tr>
<td> 规格名称 </td>
<td>{{.FlavorDetails.Name}}</td>
</tr>
<tr>
<td> 规格销售状态 </td>
<td>{{.FlavorDetails.Status}}</td>
</tr>
<tr>
<td> 排队个数 </td>
<td>{{.FlavorDetails.QueuingNum}}</td>
</tr>
<tr>
<td> 排到队的剩余时间(秒) </td>
<td>{{.FlavorDetails.QueueLeftTime}}</td>
</tr>
<tr>
<td> 自动停止时间(秒) </td>
<td>{{.FlavorDetails.Duration}}</td>
</tr>
</tbody>
</table>

<table class="ui celled striped table" {{if eq .QueuingInfo.RemainTime 0}}hidden{{end}}>
<thead>
<tr> <th colspan="2"> 排队信息 </th> </tr>
</thead>
<tbody>
<tr>
<td> 实例状态 </td>
<td>{{.QueuingInfo.Status}}</td>
</tr>
<tr>
<td> 实例排队的开始时间 </td>
<td>{{.QueuingInfo.BeginTime}}</td>
</tr>
<tr>
<td> 排到队的剩余时间(秒) </td>
<td>{{.QueuingInfo.RemainTime}}</td>
</tr>
<tr>
<td> 实例排队的预计停止时间 </td>
<td>{{.QueuingInfo.EndTime}}</td>
</tr>
<tr>
<td> 实例在队列中的排位 </td>
<td>{{.QueuingInfo.Rank}}</td>
</tr>
</tbody>
</table>
{{end}}
</div>
</div>

</div>
</div>
</div>
{{template "base/footer" .}}

+ 3
- 2
web_src/js/components/MinioUploader.vue View File

@@ -245,7 +245,8 @@ export default {
file_name: file.name, file_name: file.name,
size: file.size, size: file.size,
dataset_id: file.datasetId, dataset_id: file.datasetId,
_csrf: csrf
_csrf: csrf,
type:0
}) })
); );
} }
@@ -450,4 +451,4 @@ export default {
border-bottom: 1px solid #dadce0; border-bottom: 1px solid #dadce0;
min-height: 0; min-height: 0;
} }
</style>
</style>

+ 298
- 0
web_src/js/components/ObsUploader.vue View File

@@ -0,0 +1,298 @@
<template>
<div class="dropzone-wrapper dataset-files">
<div
id="dataset"
class="dropzone"
/>

<p class="upload-info">
{{ file_status_text }}
<span class="success">{{ status }}</span>
</p>
</div>
</template>

<script>
/* eslint-disable eqeqeq */
// import Dropzone from 'dropzone/dist/dropzone.js';
// import 'dropzone/dist/dropzone.css'
import createDropzone from '../features/dropzone.js';
import ObsClient from 'esdk-obs-browserjs';

const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config;

export default {
data() {
return {
dropzoneUploader: null,
maxFiles: 1,
maxFilesize: 1 * 1024 * 1024 * 1024 * 1024,
acceptedFiles: '*/*',
progress: 0,
status: '',
dropzoneParams: {},
file_status_text: ''
};
},

async mounted() {
this.dropzoneParams = $('div#minioUploader-params');
this.file_status_text = this.dropzoneParams.data('file-status');
this.status = this.dropzoneParams.data('file-init-status');

let previewTemplate = '';
previewTemplate += '<div class="dz-preview dz-file-preview">\n ';
previewTemplate += ' <div class="dz-details">\n ';
previewTemplate += ' <div class="dz-filename">';
previewTemplate +=
' <span data-dz-name data-dz-thumbnail></span>';
previewTemplate += ' </div>\n ';
previewTemplate += ' <div class="dz-size" data-dz-size></div>\n ';
previewTemplate += ' </div>\n ';
previewTemplate += ' <div class="dz-progress ui active progress">';
previewTemplate +=
' <div class="dz-upload bar" data-dz-uploadprogress><div class="progress"></div></div>\n ';
previewTemplate += ' </div>\n ';
previewTemplate += ' <div class="dz-success-mark">';
previewTemplate += ' <span>上传成功</span>';
previewTemplate += ' </div>\n ';
previewTemplate += ' <div class="dz-error-mark">';
previewTemplate += ' <span>上传失败</span>';
previewTemplate += ' </div>\n ';
previewTemplate += ' <div class="dz-error-message">';
previewTemplate += ' <span data-dz-errormessage></span>';
previewTemplate += ' </div>\n';
previewTemplate += '</div>';

const $dropzone = $('div#dataset');
console.log('createDropzone');

const dropzoneUploader = await createDropzone($dropzone[0], {
url: '/todouploader',
maxFiles: this.maxFiles,
maxFilesize: this.maxFileSize,
timeout: 0,
autoQueue: false,
dictDefaultMessage: this.dropzoneParams.data('default-message'),
dictInvalidFileType: this.dropzoneParams.data('invalid-input-type'),
dictFileTooBig: this.dropzoneParams.data('file-too-big'),
dictRemoveFile: this.dropzoneParams.data('remove-file'),
previewTemplate,
});

// 将文件加入文件列表
dropzoneUploader.on('addedfile', (file) => {
if(file.status == 'added'){
this.onFileAdded(file)
}
});

dropzoneUploader.on('maxfilesexceeded', function (file) {
if (this.files[0].status !== 'success') {
alert(this.dropzoneParams.data('waitting-uploading'));
this.removeFile(file);
return;
}
this.removeAllFiles();
this.addFile(file);
});

this.dropzoneUploader = dropzoneUploader;
},

methods: {
resetStatus() {
this.progress = 0;
this.status = '';
},
updateProgress(file, progress) {
file.previewTemplate.querySelector(
'.dz-upload'
).style.width = `${progress}%`;
},
emitDropzoneSuccess(file) {
file.status = 'success';
this.dropzoneUploader.emit('success', file);
this.dropzoneUploader.emit('complete', file);
},
emitDropzoneFailed(file) {
this.status = this.dropzoneParams.data('falied');
file.status = 'error';
this.dropzoneUploader.emit('error', file);
},

onFileAdded(file) {
this.resetStatus();
this.status = this.dropzoneParams.data('obs-connecting');
this.do_multi_uploader(file)
},

// 获取key, uuid
get_result(){
var res
$.ajax({
url: '/attachments/get_obs_key',
type: 'GET',
async: false,
success: function(result){
res = result
}
});
return res
},

// 构建ObsClient
getObsClient(result){
return new ObsClient({
access_key_id: result.access_key_id,
secret_access_key: result.secret_access_key,
server : result.server
});
},

// 断点续传
do_multi_uploader(file){
const result = this.get_result()
const upload_datasetId = document
.getElementById('datasetId')
.getAttribute('datasetId');
const obsClient = this.getObsClient(result)
const _this = this
var cp;
var hook;

obsClient.uploadFile({
Bucket : result.bucket,
Key : result.key,
SourceFile : file,
PartSize : 64 * 1024 * 1024,

// 更新进度条
ProgressCallback : function(transferredAmount, totalAmount, totalSeconds){
_this.updateProgress(file, ((transferredAmount / totalAmount) * 100).toFixed(2))
_this.status = `${_this.dropzoneParams.data('uploading')} ${(
(transferredAmount / totalAmount) *
100
).toFixed(2)}%`;
},

// 监听文件上传结果
EventCallback : function(eventType, eventParam, eventResult){
console.log("eventType1= ", eventType)
console.log("eventParam1= ", eventParam)
console.log("eventResult1= ", eventResult)
// 文件上传成功
if(eventType == 'completeMultipartUploadSucceed'){
console.log("file = ", file)
$.ajax({
url: '/attachments/add',
type: 'POST',
data: {
'uuid': result.uuid,
'file_name': file.name,
'size': file.size,
'dataset_id': upload_datasetId,
'_csrf': csrf,
'type': 1
},
async: false,
success: function (data) {
_this.progress = 100;
_this.status = _this.dropzoneParams.data('upload-complete');
_this.emitDropzoneSuccess(file)
setTimeout(() => {
window.location.reload();
}, 1000);
},
error: function(){
_this.emitDropzoneFailed(file)
}
});
}
},
ResumeCallback : function(resumeHook, uploadCheckpoint){
hook = resumeHook;
cp = uploadCheckpoint;
}
}, function(err, result){
// 出现错误,再次调用断点续传接口,继续上传任务
if(err){
obsClient.uploadFile({
UploadCheckpoint : cp,

// 断点续传后继续更新进度条
ProgressCallback : function(transferredAmount, totalAmount, totalSeconds){
_this.updateProgress(file, ((transferredAmount / totalAmount) * 100).toFixed(2))
_this.status = `${_this.dropzoneParams.data('uploading')} ${(
(transferredAmount / totalAmount) *
100
).toFixed(2)}%`;
},

// 监听断点续传的结果
EventCallback : function(eventType, eventParam, eventResult){
console.log("eventType2= ", eventType)
console.log("eventParam2= ", eventParam)
console.log("eventResult2= ", eventResult)
// 文件断点续传成功
if(eventType == 'completeMultipartUploadSucceed'){
$.ajax({
url: '/attachments/add',
type: 'POST',
data: {
'uuid': result.uuid,
'file_name': file.name,
'size': file.size,
'dataset_id': upload_datasetId,
'_csrf': csrf,
'type': 1
},
async: false,
success: function (data) {
_this.progress = 100;
_this.status = _this.dropzoneParams.data('upload-complete');
_this.emitDropzoneSuccess(file)
setTimeout(() => {
window.location.reload();
}, 1000);
console.log(data)
},
error: function(){
_this.emitDropzoneFailed(file)
}
});
}
if (eventType == 'uploadPartFailed'){
_this.emitDropzoneFailed(file)
}
}
});
}
});
},
}
};
</script>

<style>
.dropzone-wrapper {
margin: 2em auto;
}
.ui .dropzone {
border: 2px dashed #0087f5;
box-shadow: none !important;
padding: 0;
min-height: 5rem;
border-radius: 4px;
}
.dataset .dataset-files #dataset .dz-preview.dz-file-preview,
.dataset .dataset-files #dataset .dz-preview.dz-processing {
display: flex;
align-items: center;
}
.dataset .dataset-files #dataset .dz-preview {
border-bottom: 1px solid #dadce0;
min-height: 0;
}
</style>

+ 17
- 0
web_src/js/index.js View File

@@ -29,6 +29,7 @@ import {
} from './features/notification.js'; } from './features/notification.js';
import {createCodeEditor} from './features/codeeditor.js'; import {createCodeEditor} from './features/codeeditor.js';
import MinioUploader from './components/MinioUploader.vue'; import MinioUploader from './components/MinioUploader.vue';
import ObsUploader from './components/ObsUploader.vue'


const {AppSubUrl, StaticUrlPrefix, csrf} = window.config; const {AppSubUrl, StaticUrlPrefix, csrf} = window.config;


@@ -2955,6 +2956,7 @@ $(document).ready(async () => {
initCodeView(); initCodeView();
initVueApp(); initVueApp();
initVueUploader(); initVueUploader();
initObsUploader();
initTeamSettings(); initTeamSettings();
initCtrlEnterSubmit(); initCtrlEnterSubmit();
initNavbarContentToggle(); initNavbarContentToggle();
@@ -3641,6 +3643,21 @@ function initVueUploader() {
}); });
} }


// 新增
function initObsUploader() {
const el = document.getElementById('obsUploader');
if (!el) {
return;
}

new Vue({
el: '#obsUploader',
components: {ObsUploader},
template: '<ObsUploader />'
});
}


window.timeAddManual = function () { window.timeAddManual = function () {
$('.mini.modal') $('.mini.modal')
.modal({ .modal({


+ 4
- 1
webpack.config.js View File

@@ -44,6 +44,9 @@ module.exports = {
filename: 'js/[name].js', filename: 'js/[name].js',
chunkFilename: 'js/[name].js', chunkFilename: 'js/[name].js',
}, },
node:{
fs: 'empty'
},
optimization: { optimization: {
minimize: isProduction, minimize: isProduction,
minimizer: [ minimizer: [
@@ -237,7 +240,7 @@ module.exports = {
}), }),
new MonacoWebpackPlugin({ new MonacoWebpackPlugin({
filename: 'js/monaco-[name].worker.js', filename: 'js/monaco-[name].worker.js',
}),
})
], ],
performance: { performance: {
hints: false, hints: false,


Loading…
Cancel
Save