diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index 13a3d4aeb..a027624f4 100755 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -1069,3 +1069,19 @@ PASSWORD = 4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC [blockchain] HOST = http://192.168.207.84:3002/ COMMIT_VALID_DATE = 2021-01-15 + +[obs] +ENDPOINT = https://obs.cn-south-222.ai.pcl.cn +ACCESS_KEY_ID = FDP3LRMHLB9S77VWEHE3 +SECRET_ACCESS_KEY = LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN +BUCKET = testopendata +LOCATION = cn-south-222 +BASE_PATH = attachment/ + +[modelarts] +ENDPOINT = https://modelarts.cn-south-222.ai.pcl.cn +PROJECT_ID = edfccf24aace4e17a56da6bcbb55a5aa +PROJECT_NAME = cn-south-222_test +USERNAME = test1 +PASSWORD = Qizhi@test. +DOMAIN = cn-south-222 diff --git a/models/attachment.go b/models/attachment.go index f08063b83..2dfe934a5 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -41,6 +41,7 @@ type Attachment struct { Size int64 `xorm:"DEFAULT 0"` IsPrivate bool `xorm:"DEFAULT false"` DecompressState int32 `xorm:"DEFAULT 0"` + Type int `xorm:"DEFAULT 0"` CreatedUnix timeutil.TimeStamp `xorm:"created"` } @@ -350,7 +351,7 @@ func GetUnDecompressAttachments() ([]*Attachment, error) { func getUnDecompressAttachments(e Engine) ([]*Attachment, error) { attachments := make([]*Attachment, 0, 10) - return attachments, e.Where("decompress_state = ? and dataset_id != 0 and name like '%.zip'", DecompressStateInit).Find(&attachments) + return attachments, e.Where("decompress_state = ? and dataset_id != 0 and attachment.type = ? and name like '%.zip'", DecompressStateInit, TypeCloudBrainOne).Find(&attachments) } func GetAllPublicAttachments() ([]*AttachmentUsername, error) { @@ -360,7 +361,7 @@ func GetAllPublicAttachments() ([]*AttachmentUsername, error) { func getAllPublicAttachments(e Engine) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and is_private= ?", DecompressStateDone, false).Find(&attachments); err != nil { + "= `user`.id").Where("decompress_state= ? and is_private= ? and attachment.type = ?", DecompressStateDone, false, TypeCloudBrainOne).Find(&attachments); err != nil { return nil, err } return attachments, nil @@ -378,40 +379,34 @@ func GetPrivateAttachments(username string) ([]*AttachmentUsername, error) { func getPrivateAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and uploader_id= ?", DecompressStateDone, userID).Find(&attachments); err != nil { + "= `user`.id").Where("decompress_state= ? and uploader_id= ? and attachment.type = ?", DecompressStateDone, userID, TypeCloudBrainOne).Find(&attachments); err != nil { return nil, err } return attachments, nil } -/* -func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) { - attachsPub, err := getAllPublicAttachments(x) - if err != nil { - log.Error("getAllPublicAttachments failed:%v", err) - return nil, err - } - - attachsPri, err := getPrivateAttachments(x, userID) - if err != nil { - log.Error("getPrivateAttachments failed:%v", err) +func getAllUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { + attachments := make([]*AttachmentUsername, 0, 10) + if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ + "= `user`.id").Where("decompress_state= ? and attachment.type = ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, TypeCloudBrainOne, userID, false).Find(&attachments); err != nil { return nil, err } - - return append(attachsPub, attachsPri...), nil + return attachments, nil } -*/ +func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) { + return getAllUserAttachments(x, userID) +} -func getAllUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { +func getModelArtsUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, userID, false).Find(&attachments); err != nil { + "= `user`.id").Where("attachment.type = ? and (uploader_id= ? or is_private = ?)", TypeCloudBrainTwo, userID, false).Find(&attachments); err != nil { return nil, err } return attachments, nil } -func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) { - return getAllUserAttachments(x, userID) +func GetModelArtsUserAttachments(userID int64) ([]*AttachmentUsername, error) { + return getModelArtsUserAttachments(x, userID) } diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 592107a46..036c25264 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -14,6 +14,7 @@ import ( type CloudbrainStatus string type JobType string +type ModelArtsJobStatus string const ( JobWaiting CloudbrainStatus = "WAITING" @@ -24,6 +25,22 @@ const ( JobTypeDebug JobType = "DEBUG" JobTypeBenchmark JobType = "BENCHMARK" + + ModelArtsCreateQueue ModelArtsJobStatus = "CREATE_QUEUING" //免费资源创建排队中 + ModelArtsCreating ModelArtsJobStatus = "CREATING" //创建中 + ModelArtsCreateFailed ModelArtsJobStatus = "CREATE_FAILED" //创建失败 + ModelArtsStartQueuing ModelArtsJobStatus = "START_QUEUING" //免费资源启动排队中 + ModelArtsReadyToStart ModelArtsJobStatus = "READY_TO_START" //免费资源等待启动 + ModelArtsStarting ModelArtsJobStatus = "STARTING" //启动中 + ModelArtsRestarting ModelArtsJobStatus = "RESTARTING" //重启中 + ModelArtsStartFailed ModelArtsJobStatus = "START_FAILED" //启动失败 + ModelArtsRunning ModelArtsJobStatus = "RUNNING" //运行中 + ModelArtsStopping ModelArtsJobStatus = "STOPPING" //停止中 + ModelArtsStopped ModelArtsJobStatus = "STOPPED" //停止 + ModelArtsUnavailable ModelArtsJobStatus = "UNAVAILABLE" //故障 + ModelArtsDeleted ModelArtsJobStatus = "DELETED" //已删除 + ModelArtsResizing ModelArtsJobStatus = "RESIZING" //规格变更中 + ModelArtsResizFailed ModelArtsJobStatus = "RESIZE_FAILED" //规格变更失败 ) type Cloudbrain struct { @@ -41,6 +58,7 @@ type Cloudbrain struct { UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` DeletedAt time.Time `xorm:"deleted"` CanDebug bool `xorm:"-"` + Type int `xorm:"INDEX DEFAULT 0"` User *User `xorm:"-"` Repo *Repository `xorm:"-"` @@ -117,6 +135,7 @@ type CloudbrainsOptions struct { SortType string CloudbrainIDs []int64 // JobStatus CloudbrainStatus + Type int } type TaskPod struct { TaskRoleStatus struct { @@ -263,6 +282,181 @@ type StopJobResult struct { Msg string `json:"msg"` } +type CreateNotebookParams struct { + JobName string `json:"name"` + Description string `json:"description"` + ProfileID string `json:"profile_id"` + Flavor string `json:"flavor"` + Spec Spec `json:"spec"` + Workspace Workspace `json:"workspace"` +} + +type Workspace struct { + ID string `json:"id"` +} + +type Spec struct { + Storage Storage `json:"storage"` + AutoStop AutoStop `json:"auto_stop"` +} + +type AutoStop struct { + Enable bool `json:"enable"` + Duration int `json:"duration"` +} + +type Storage struct { + Type string `json:"type"` + Location Location `json:"location"` +} + +type Location struct { + Path string `json:"path"` +} + +type NotebookResult struct { + ErrorCode string `json:"error_code"` + ErrorMsg string `json:"error_msg"` +} + +type CreateNotebookResult struct { + ErrorCode string `json:"error_code"` + ErrorMsg string `json:"error_msg"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status"` + CreationTimestamp string `json:"creation_timestamp"` + LatestUpdateTimestamp string `json:"latest_update_timestamp"` + Profile struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + DeType string `json:"de_type"` + FlavorType string `json:"flavor_type"` + } `json:"profile"` + Flavor string `json:"flavor"` + FlavorDetails struct{ + Name string `json:"name"` + Status string `json:"status"` + QueuingNum int `json:"queuing_num"` + QueueLeftTime int `json:"queue_left_time"` //s + Duration int `json:"duration"` //auto_stop_time s + } `json:"flavor_details"` +} + +type GetNotebookResult struct { + ErrorCode string `json:"error_code"` + ErrorMsg string `json:"error_msg"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Status string `json:"status"` + CreationTimestamp string `json:"creation_timestamp"` + CreateTime string + LatestUpdateTimestamp string `json:"latest_update_timestamp"` + LatestUpdateTime string + Profile struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + DeType string `json:"de_type"` + FlavorType string `json:"flavor_type"` + } `json:"profile"` + Flavor string `json:"flavor"` + FlavorDetails struct{ + Name string `json:"name"` + Status string `json:"status"` + QueuingNum int `json:"queuing_num"` + QueueLeftTime int `json:"queue_left_time"` //s + Duration int `json:"duration"` //auto_stop_time s + } `json:"flavor_details"` + QueuingInfo struct{ + ID string `json:"id"` + Name string `json:"name"` + Flavor string `json:"flavor"` + DeType string `json:"de_type"` + Status string `json:"status"` + BeginTimestamp int `json:"begin_timestamp"`//time of instance begin in queue + BeginTime string + RemainTime int `json:"remain_time"` //remain time of instance + EndTimestamp int `json:"end_timestamp"` // + EndTime string + Rank int `json:"rank"` //rank of instance in queue + } `json:"queuing_info"` + Spec struct{ + Annotations struct{ + TargetDomain string `json:"target_domain"` + Url string `json:"url"` + } `json:"annotations"` + } `json:"spec"` +} + +type GetTokenParams struct { + Auth Auth `json:"auth"` +} + +type Auth struct { + Identity Identity `json:"identity"` + Scope Scope `json:"scope"` +} + +type Scope struct { + Project Project `json:"project"` +} + +type Project struct { + Name string `json:"name"` +} + +type Identity struct { + Methods []string `json:"methods"` + Password Password `json:"password"` +} + +type Password struct { + User NotebookUser `json:"user"` +} + +type NotebookUser struct { + Name string `json:"name"` + Password string `json:"password"` + Domain Domain `json:"domain"` +} + +type Domain struct { + Name string `json:"name"` +} + +const ( + ActionStart = "start" + ActionStop = "stop" + ActionRestart = "restart" + ActionQueue = "queue" + ActionDequeue = "dequeue" +) + +type NotebookAction struct { + Action string `json:"action"` +} + +type NotebookActionResult struct { + ErrorCode string `json:"error_code"` + ErrorMsg string `json:"error_msg"` + CurrentStatus string `json:"current_status"` + PreviousState string `json:"previous_state"` +} + +type NotebookGetJobTokenResult struct { + ErrorCode string `json:"error_code"` + ErrorMsg string `json:"error_msg"` + Token string `json:"token"` +} + +type NotebookDelResult struct { + InstanceID string `json:"instance_id"` +} + func Cloudbrains(opts *CloudbrainsOptions) ([]*Cloudbrain, int64, error) { sess := x.NewSession() defer sess.Close() @@ -286,6 +480,12 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*Cloudbrain, int64, error) { ) } + if (opts.Type) >= 0 { + cond = cond.And( + builder.Eq{"cloudbrain.type": opts.Type}, + ) + } + // switch opts.JobStatus { // case JobWaiting: // cond.And(builder.Eq{"cloudbrain.status": int(JobWaiting)}) diff --git a/models/dataset.go b/models/dataset.go index 7e72d3e56..f6f2ef501 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -196,11 +196,11 @@ func (s datasetMetaSearch) Less(i, j int) bool { return s.ID[i] < s.ID[j] } -func GetDatasetAttachments(rels ...*Dataset) (err error) { - return getDatasetAttachments(x, rels...) +func GetDatasetAttachments(typeCloudBrain int ,rels ...*Dataset) (err error) { + return getDatasetAttachments(x, typeCloudBrain, rels...) } -func getDatasetAttachments(e Engine, rels ...*Dataset) (err error) { +func getDatasetAttachments(e Engine, typeCloudBrain int, rels ...*Dataset) (err error) { if len(rels) == 0 { return } @@ -223,6 +223,7 @@ func getDatasetAttachments(e Engine, rels ...*Dataset) (err error) { err = e. Asc("dataset_id"). In("dataset_id", sortedRels.ID). + And("type = ?", typeCloudBrain). Find(&attachments, Attachment{}) if err != nil { return err diff --git a/models/file_chunk.go b/models/file_chunk.go index 4eb379b01..3b79adc7c 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -10,6 +10,11 @@ const ( FileUploaded ) +const ( + TypeCloudBrainOne = 0 + TypeCloudBrainTwo = 1 +) + type FileChunk struct { ID int64 `xorm:"pk autoincr"` UUID string `xorm:"uuid UNIQUE"` @@ -19,7 +24,8 @@ type FileChunk struct { TotalChunks int Size int64 UserID int64 `xorm:"INDEX"` - CompletedParts []string `xorm:"DEFAULT """` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas + Type int `xorm:"INDEX DEFAULT 0"` + CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } @@ -41,14 +47,14 @@ func getFileChunkByMD5(e Engine, md5 string) (*FileChunk, error) { } // GetFileChunkByMD5 returns fileChunk by given id -func GetFileChunkByMD5AndUser(md5 string, userID int64) (*FileChunk, error) { - return getFileChunkByMD5AndUser(x, md5, userID) +func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { + return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain) } -func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64) (*FileChunk, error) { +func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { fileChunk := new(FileChunk) - if has, err := e.Where("md5 = ? and user_id = ?", md5, userID).Get(fileChunk); err != nil { + if has, err := e.Where("md5 = ? and user_id = ? and type = ?", md5, userID, typeCloudBrain).Get(fileChunk); err != nil { return nil, err } else if !has { return nil, ErrFileChunkNotExist{md5, ""} @@ -89,6 +95,6 @@ func UpdateFileChunk(fileChunk *FileChunk) error { func updateFileChunk(e Engine, fileChunk *FileChunk) error { var sess *xorm.Session sess = e.Where("uuid = ?", fileChunk.UUID) - _, err := sess.Cols("is_uploaded", "completed_parts").Update(fileChunk) + _, err := sess.Cols("is_uploaded").Update(fileChunk) return err } diff --git a/models/user.go b/models/user.go index 4d282f477..2a150187e 100755 --- a/models/user.go +++ b/models/user.go @@ -2045,8 +2045,8 @@ func SyncExternalUsers(ctx context.Context, updateExisting bool) error { func GetBlockChainUnSuccessUsers() ([]*User, error) { users := make([]*User, 0, 10) - err := x.Where("public_key is null"). - Or("private_key is null"). + err := x.Where("public_key = ''"). + Or("private_key = ''"). Find(&users) return users, err } diff --git a/modules/APIGW-go-sdk-2.0.2/core/escape.go b/modules/APIGW-go-sdk-2.0.2/core/escape.go new file mode 100755 index 000000000..e8c76b8ae --- /dev/null +++ b/modules/APIGW-go-sdk-2.0.2/core/escape.go @@ -0,0 +1,42 @@ +// based on https://github.com/golang/go/blob/master/src/net/url/url.go +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +func shouldEscape(c byte) bool { + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c == '-' || c == '~' || c == '.' { + return false + } + return true +} +func escape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} diff --git a/modules/APIGW-go-sdk-2.0.2/core/signer.go b/modules/APIGW-go-sdk-2.0.2/core/signer.go new file mode 100755 index 000000000..7992713b3 --- /dev/null +++ b/modules/APIGW-go-sdk-2.0.2/core/signer.go @@ -0,0 +1,208 @@ +// HWS API Gateway Signature +// based on https://github.com/datastream/aws/blob/master/signv4.go +// Copyright (c) 2014, Xianjie + +package core + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strings" + "time" +) + +const ( + BasicDateFormat = "20060102T150405Z" + Algorithm = "SDK-HMAC-SHA256" + HeaderXDate = "X-Sdk-Date" + HeaderHost = "host" + HeaderAuthorization = "Authorization" + HeaderContentSha256 = "X-Sdk-Content-Sha256" +) + +func hmacsha256(key []byte, data string) ([]byte, error) { + h := hmac.New(sha256.New, []byte(key)) + if _, err := h.Write([]byte(data)); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// Build a CanonicalRequest from a regular request string +// +// CanonicalRequest = +// HTTPRequestMethod + '\n' + +// CanonicalURI + '\n' + +// CanonicalQueryString + '\n' + +// CanonicalHeaders + '\n' + +// SignedHeaders + '\n' + +// HexEncode(Hash(RequestPayload)) +func CanonicalRequest(r *http.Request, signedHeaders []string) (string, error) { + var hexencode string + var err error + if hex := r.Header.Get(HeaderContentSha256); hex != "" { + hexencode = hex + } else { + data, err := RequestPayload(r) + if err != nil { + return "", err + } + hexencode, err = HexEncodeSHA256Hash(data) + if err != nil { + return "", err + } + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, CanonicalURI(r), CanonicalQueryString(r), CanonicalHeaders(r, signedHeaders), strings.Join(signedHeaders, ";"), hexencode), err +} + +// CanonicalURI returns request uri +func CanonicalURI(r *http.Request) string { + pattens := strings.Split(r.URL.Path, "/") + var uri []string + for _, v := range pattens { + uri = append(uri, escape(v)) + } + urlpath := strings.Join(uri, "/") + if len(urlpath) == 0 || urlpath[len(urlpath)-1] != '/' { + urlpath = urlpath + "/" + } + return urlpath +} + +// CanonicalQueryString +func CanonicalQueryString(r *http.Request) string { + var keys []string + query := r.URL.Query() + for key := range query { + keys = append(keys, key) + } + sort.Strings(keys) + var a []string + for _, key := range keys { + k := escape(key) + sort.Strings(query[key]) + for _, v := range query[key] { + kv := fmt.Sprintf("%s=%s", k, escape(v)) + a = append(a, kv) + } + } + queryStr := strings.Join(a, "&") + r.URL.RawQuery = queryStr + return queryStr +} + +// CanonicalHeaders +func CanonicalHeaders(r *http.Request, signerHeaders []string) string { + var a []string + header := make(map[string][]string) + for k, v := range r.Header { + header[strings.ToLower(k)] = v + } + for _, key := range signerHeaders { + value := header[key] + if strings.EqualFold(key, HeaderHost) { + value = []string{r.Host} + } + sort.Strings(value) + for _, v := range value { + a = append(a, key+":"+strings.TrimSpace(v)) + } + } + return fmt.Sprintf("%s\n", strings.Join(a, "\n")) +} + +// SignedHeaders +func SignedHeaders(r *http.Request) []string { + var a []string + for key := range r.Header { + a = append(a, strings.ToLower(key)) + } + sort.Strings(a) + return a +} + +// RequestPayload +func RequestPayload(r *http.Request) ([]byte, error) { + if r.Body == nil { + return []byte(""), nil + } + b, err := ioutil.ReadAll(r.Body) + if err != nil { + return []byte(""), err + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + return b, err +} + +// Create a "String to Sign". +func StringToSign(canonicalRequest string, t time.Time) (string, error) { + hash := sha256.New() + _, err := hash.Write([]byte(canonicalRequest)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%x", + Algorithm, t.UTC().Format(BasicDateFormat), hash.Sum(nil)), nil +} + +// Create the HWS Signature. +func SignStringToSign(stringToSign string, signingKey []byte) (string, error) { + hm, err := hmacsha256(signingKey, stringToSign) + return fmt.Sprintf("%x", hm), err +} + +// HexEncodeSHA256Hash returns hexcode of sha256 +func HexEncodeSHA256Hash(body []byte) (string, error) { + hash := sha256.New() + if body == nil { + body = []byte("") + } + _, err := hash.Write(body) + return fmt.Sprintf("%x", hash.Sum(nil)), err +} + +// Get the finalized value for the "Authorization" header. The signature parameter is the output from SignStringToSign +func AuthHeaderValue(signature, accessKey string, signedHeaders []string) string { + return fmt.Sprintf("%s Access=%s, SignedHeaders=%s, Signature=%s", Algorithm, accessKey, strings.Join(signedHeaders, ";"), signature) +} + +// Signature HWS meta +type Signer struct { + Key string + Secret string +} + +// SignRequest set Authorization header +func (s *Signer) Sign(r *http.Request) error { + var t time.Time + var err error + var dt string + if dt = r.Header.Get(HeaderXDate); dt != "" { + t, err = time.Parse(BasicDateFormat, dt) + } + if err != nil || dt == "" { + t = time.Now() + r.Header.Set(HeaderXDate, t.UTC().Format(BasicDateFormat)) + } + signedHeaders := SignedHeaders(r) + canonicalRequest, err := CanonicalRequest(r, signedHeaders) + if err != nil { + return err + } + stringToSign, err := StringToSign(canonicalRequest, t) + if err != nil { + return err + } + signature, err := SignStringToSign(stringToSign, []byte(s.Secret)) + if err != nil { + return err + } + authValue := AuthHeaderValue(signature, s.Key, signedHeaders) + r.Header.Set(HeaderAuthorization, authValue) + return nil +} diff --git a/modules/auth/cloudbrain.go b/modules/auth/cloudbrain.go index 2b150cf75..2470c2ad6 100755 --- a/modules/auth/cloudbrain.go +++ b/modules/auth/cloudbrain.go @@ -5,7 +5,6 @@ import ( "gitea.com/macaron/macaron" ) -// CreateDatasetForm form for dataset page type CreateCloudBrainForm struct { JobName string `form:"job_name" binding:"Required"` Image string `form:"image" binding:"Required"` diff --git a/modules/auth/modelarts.go b/modules/auth/modelarts.go new file mode 100755 index 000000000..fb8280b9e --- /dev/null +++ b/modules/auth/modelarts.go @@ -0,0 +1,16 @@ +package auth + +import ( + "gitea.com/macaron/binding" + "gitea.com/macaron/macaron" +) + +type CreateModelArtsForm struct { + JobName string `form:"job_name" binding:"Required"` + Attachment string `form:"attachment" binding:"Required"` + Description string `form:"description"` +} + +func (f *CreateModelArtsForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { + return validate(errs, ctx.Data, f, ctx.Locale) +} diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index dedfb55f6..50de926ca 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -98,6 +98,7 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, JobName: jobName, SubTaskName: SubTaskName, JobType: jobType, + Type: models.TypeCloudBrainOne, }) if err != nil { diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go new file mode 100755 index 000000000..844f37ffb --- /dev/null +++ b/modules/modelarts/modelarts.go @@ -0,0 +1,65 @@ +package modelarts + +import ( + "code.gitea.io/gitea/modules/setting" + "path" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" +) + +const ( + storageTypeOBS = "obs" + autoStopDuration = 4 * 60 * 60 + flavor = "modelarts.kat1.xlarge" + profileID = "Python3-ascend910-arm" + + DataSetMountPath = "/home/ma-user/work" + NotebookEnv = "Python3" + NotebookType = "Ascend" + FlavorInfo = "Ascend: 1*Ascend 910 CPU: 24 核 96GiB (modelarts.kat1.xlarge)" +) + +func GenerateTask(ctx *context.Context, jobName, uuid, description string) error { + dataActualPath := setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/" + jobResult, err := CreateJob(models.CreateNotebookParams{ + JobName: jobName, + Description:description, + ProfileID: profileID, + Flavor: flavor, + Spec: models.Spec{ + Storage: models.Storage{ + Type: storageTypeOBS, + Location:models.Location{ + Path: dataActualPath, + }, + }, + AutoStop: models.AutoStop{ + Enable: true, + Duration: autoStopDuration, + }, + }, + + }) + if err != nil { + log.Error("CreateJob failed: %v", err.Error()) + return err + } + + err = models.CreateCloudbrain(&models.Cloudbrain{ + Status: string(models.JobWaiting), + UserID: ctx.User.ID, + RepoID: ctx.Repo.Repository.ID, + JobID: jobResult.ID, + JobName: jobName, + JobType: string(models.JobTypeDebug), + Type: models.TypeCloudBrainTwo, + }) + + if err != nil { + return err + } + + return nil +} diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go new file mode 100755 index 000000000..df020decb --- /dev/null +++ b/modules/modelarts/resty.go @@ -0,0 +1,288 @@ +package modelarts + +import ( + "code.gitea.io/gitea/modules/log" + "crypto/tls" + "encoding/json" + "fmt" + "net/http" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/setting" + "github.com/go-resty/resty/v2" +) + +var ( + restyClient *resty.Client + HOST string + TOKEN string +) + +const ( + methodPassword = "password" + + urlGetToken = "/v3/auth/tokens" + urlNotebook = "/demanager/instances" + errorCodeExceedLimit = "ModelArts.0118" +) +func getRestyClient() *resty.Client { + if restyClient == nil { + restyClient = resty.New() + restyClient.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + } + return restyClient +} + +func checkSetting() { + if len(HOST) != 0 && len(TOKEN) != 0 && restyClient != nil { + return + } + + err := getToken() + if err != nil { + log.Error("getToken failed:%v", err) + } +} + +func getToken() error { + HOST = setting.ModelArtsHost + + client := getRestyClient() + params := models.GetTokenParams{ + Auth: models.Auth{ + Identity: models.Identity{ + Methods: []string{methodPassword}, + Password: models.Password{ + User: models.NotebookUser{ + Name: setting.ModelArtsUsername, + Password: setting.ModelArtsPassword, + Domain: models.Domain{ + Name: setting.ModelArtsDomain, + }, + }, + }, + }, + Scope: models.Scope{ + Project: models.Project{ + Name: setting.ProjectName, + }, + }, + }, + } + + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetBody(params). + Post(setting.IamHost + urlGetToken) + if err != nil { + return fmt.Errorf("resty getToken: %v", err) + } + + if res.StatusCode() != http.StatusCreated { + return fmt.Errorf("getToken failed:%s", res.String()) + } + + TOKEN = res.Header().Get("X-Subject-Token") + + return nil +} + +func CreateJob(createJobParams models.CreateNotebookParams) (*models.CreateNotebookResult, error) { + checkSetting() + client := getRestyClient() + var result models.CreateNotebookResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(TOKEN). + SetBody(createJobParams). + SetResult(&result). + Post(HOST + "/v1/" + setting.ProjectID + urlNotebook) + + if err != nil { + return nil, fmt.Errorf("resty create job: %s", err) + } + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + var response models.NotebookResult + err = json.Unmarshal(res.Body(), &response) + if err != nil { + log.Error("json.Unmarshal failed: %s", err.Error()) + return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) + } + + if len(response.ErrorCode) != 0 { + log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + if response.ErrorCode == errorCodeExceedLimit { + response.ErrorMsg = "所选规格使用数量已超过最大配额限制。" + } + return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + } + + return &result, nil +} + +func GetJob(jobID string) (*models.GetNotebookResult, error) { + checkSetting() + client := getRestyClient() + var result models.GetNotebookResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(TOKEN). + SetResult(&result). + Get(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID) + + if err != nil { + return nil, fmt.Errorf("resty GetJob: %v", err) + } + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + var response models.NotebookResult + err = json.Unmarshal(res.Body(), &response) + if err != nil { + log.Error("json.Unmarshal failed: %s", err.Error()) + return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) + } + + if len(response.ErrorCode) != 0 { + log.Error("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + return &result, fmt.Errorf("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + } + + return &result, nil +} + +func StopJob(jobID string, param models.NotebookAction) (*models.NotebookActionResult, error) { + checkSetting() + client := getRestyClient() + var result models.NotebookActionResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetBody(param). + SetAuthToken(TOKEN). + SetResult(&result). + Post(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID + "/action") + + if err != nil { + return &result, fmt.Errorf("resty StopJob: %v", err) + } + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + var response models.NotebookResult + err = json.Unmarshal(res.Body(), &response) + if err != nil { + log.Error("json.Unmarshal failed: %s", err.Error()) + return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) + } + + if len(response.ErrorCode) != 0 { + log.Error("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + return &result, fmt.Errorf("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + } + + return &result, nil +} + +func DelJob(jobID string) (*models.NotebookDelResult, error) { + checkSetting() + client := getRestyClient() + var result models.NotebookDelResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(TOKEN). + SetResult(&result). + Delete(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID) + + if err != nil { + return &result, fmt.Errorf("resty DelJob: %v", err) + } + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + var response models.NotebookResult + err = json.Unmarshal(res.Body(), &response) + if err != nil { + log.Error("json.Unmarshal failed: %s", err.Error()) + return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) + } + + if len(response.ErrorCode) != 0 { + log.Error("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + return &result, fmt.Errorf("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + } + + return &result, nil +} + +func GetJobToken(jobID string) (*models.NotebookGetJobTokenResult, error) { + checkSetting() + client := getRestyClient() + var result models.NotebookGetJobTokenResult + + retry := 0 + +sendjob: + res, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(TOKEN). + SetResult(&result). + Get(HOST + "/v1/" + setting.ProjectID + urlNotebook + "/" + jobID + "/token") + + if err != nil { + return &result, fmt.Errorf("resty GetJobToken: %v", err) + } + + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + var response models.NotebookResult + err = json.Unmarshal(res.Body(), &response) + if err != nil { + log.Error("json.Unmarshal failed: %s", err.Error()) + return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) + } + + if len(response.ErrorCode) != 0 { + log.Error("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) + return &result, fmt.Errorf("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) + } + + return &result, nil +} diff --git a/modules/obs/auth.go b/modules/obs/auth.go new file mode 100755 index 000000000..607a5ec39 --- /dev/null +++ b/modules/obs/auth.go @@ -0,0 +1,466 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "net/url" + "sort" + "strings" + "time" +) + +func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, expires int64) (requestURL string, err error) { + isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == "" + if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken + } + } + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + hostName := parsedRequestURL.Host + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + if isV4 { + date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0]) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + delete(headers, HEADER_DATE_CAMEL) + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + if len(headers[HEADER_HOST_CAMEL]) != 0 { + index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":") + if index != -1 { + port := headers[HEADER_HOST_CAMEL][0][index+1:] + if port == "80" || port == "443" { + headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]} + } + } + + } + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate) + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires) + params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";") + + requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + return "", _err + } + + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers) + signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate) + + requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false)) + + } else { + originDate := headers[HEADER_DATE_CAMEL][0] + date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + expires += date.Unix() + headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)} + + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs) + signature := UrlEncode(Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign))), false) + if strings.Index(requestURL, "?") < 0 { + requestURL += "?" + } else { + requestURL += "&" + } + delete(headers, HEADER_DATE_CAMEL) + + if obsClient.conf.signature != SignatureObs { + requestURL += "AWS" + } + requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(obsClient.conf.securityProvider.ak, false), expires, signature) + } + } + + return +} + +func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, hostName string) (requestURL string, err error) { + isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == "" + if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = []string{obsClient.conf.securityProvider.securityToken} + } else { + headers[HEADER_STS_TOKEN_AMZ] = []string{obsClient.conf.securityProvider.securityToken} + } + } + isObs := obsClient.conf.signature == SignatureObs + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + + if hostName == "" { + hostName = parsedRequestURL.Host + } + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + ak := obsClient.conf.securityProvider.ak + sk := obsClient.conf.securityProvider.sk + var authorization string + if isV4 { + headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD} + ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers) + authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else { + ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + hashPrefix := V2_HASH_PREFIX + if isObs { + hashPrefix = OBS_HASH_PREFIX + } + authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"]) + } + headers[HEADER_AUTH_CAMEL] = []string{authorization} + } + return +} + +func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) { + headers[HEADER_HOST_CAMEL] = []string{hostName} + if date, ok := headers[HEADER_DATE_AMZ]; ok { + flag := false + if len(date) == 1 { + if isV4 { + if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)} + flag = true + } + } else { + if strings.HasSuffix(date[0], "GMT") { + headers[HEADER_DATE_CAMEL] = []string{date[0]} + flag = true + } + } + } + if !flag { + delete(headers, HEADER_DATE_AMZ) + } + } + if _, ok := headers[HEADER_DATE_CAMEL]; !ok { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())} + } +} + +func encodeHeaders(headers map[string][]string) { + for key, values := range headers { + for index, value := range values { + values[index] = UrlEncode(value, true) + } + headers[key] = values + } +} + +func attachHeaders(headers map[string][]string, isObs bool) string { + length := len(headers) + _headers := make(map[string][]string, length) + keys := make([]string, 0, length) + + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + prefixheader := HEADER_PREFIX + if isObs { + prefixheader = HEADER_PREFIX_OBS + } + if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) { + keys = append(keys, _key) + _headers[_key] = value + } + } else { + delete(headers, key) + } + } + + for _, interestedHeader := range interestedHeaders { + if _, ok := _headers[interestedHeader]; !ok { + _headers[interestedHeader] = []string{""} + keys = append(keys, interestedHeader) + } + } + dateCamelHeader := PARAM_DATE_AMZ_CAMEL + dataHeader := HEADER_DATE_AMZ + if isObs { + dateCamelHeader = PARAM_DATE_OBS_CAMEL + dataHeader = HEADER_DATE_OBS + } + if _, ok := _headers[HEADER_DATE_CAMEL]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } + + sort.Strings(keys) + + stringToSign := make([]string, 0, len(keys)) + for _, key := range keys { + var value string + prefixHeader := HEADER_PREFIX + prefixMetaHeader := HEADER_PREFIX_META + if isObs { + prefixHeader = HEADER_PREFIX_OBS + prefixMetaHeader = HEADER_PREFIX_META_OBS + } + if strings.HasPrefix(key, prefixHeader) { + if strings.HasPrefix(key, prefixMetaHeader) { + for index, v := range _headers[key] { + value += strings.TrimSpace(v) + if index != len(_headers[key])-1 { + value += "," + } + } + } else { + value = strings.Join(_headers[key], ",") + } + value = fmt.Sprintf("%s:%s", key, value) + } else { + value = strings.Join(_headers[key], ",") + } + stringToSign = append(stringToSign, value) + } + return strings.Join(stringToSign, "\n") +} + +func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string { + stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "") + + var isSecurityToken bool + var securityToken []string + if isObs { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS] + } else { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + parmas := strings.Split(canonicalizedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + } + logStringToSign := stringToSign + if isSecurityToken && len(securityToken) > 0 { + logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign) + return stringToSign +} + +func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string { + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))} +} + +func getScope(region, shortDate string) string { + return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX) +} + +func getCredential(ak, region, shortDate string) (string, string) { + scope := getScope(region, shortDate) + return fmt.Sprintf("%s/%s", ak, scope), scope +} + +func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string { + canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4) + canonicalRequest = append(canonicalRequest, method) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, canonicalizedURL) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, queryURL) + canonicalRequest = append(canonicalRequest, "\n") + + for _, signedHeader := range signedHeaders { + values, _ := headers[signedHeader] + for _, value := range values { + canonicalRequest = append(canonicalRequest, signedHeader) + canonicalRequest = append(canonicalRequest, ":") + canonicalRequest = append(canonicalRequest, value) + canonicalRequest = append(canonicalRequest, "\n") + } + } + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";")) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, payload) + + _canonicalRequest := strings.Join(canonicalRequest, "") + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + query = strings.Split(queryURL, "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + logCanonicalRequest := _canonicalRequest + if isSecurityToken && len(securityToken) > 0 { + logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest) + + stringToSign := make([]string, 0, 7) + stringToSign = append(stringToSign, V4_HASH_PREFIX) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, longDate) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, scope) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest))) + + _stringToSign := strings.Join(stringToSign, "") + + doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign) + return _stringToSign +} + +func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) { + length := len(headers) + _headers := make(map[string][]string, length) + signedHeaders := make([]string, 0, length) + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + signedHeaders = append(signedHeaders, _key) + _headers[_key] = value + } else { + delete(headers, key) + } + } + sort.Strings(signedHeaders) + return signedHeaders, _headers +} + +func getSignature(stringToSign, sk, region, shortDate string) string { + key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate)) + key = HmacSha256(key, []byte(region)) + key = HmacSha256(key, []byte(V4_SERVICE_NAME)) + key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX)) + return Hex(HmacSha256(key, []byte(stringToSign))) +} + +// V4Auth is a wrapper for v4Auth +func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers) +} + +func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + var t time.Time + if val, ok := headers[HEADER_DATE_AMZ]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[HEADER_DATE_CAMEL]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else { + t = time.Now().UTC() + } + shortDate := t.Format(SHORT_DATE_FORMAT) + longDate := t.Format(LONG_DATE_FORMAT) + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(ak, region, shortDate) + + payload := UNSIGNED_PAYLOAD + if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok { + payload = val[0] + } + stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers) + + signature := getSignature(stringToSign, sk, region, shortDate) + + ret := make(map[string]string, 3) + ret["Credential"] = credential + ret["SignedHeaders"] = strings.Join(signedHeaders, ";") + ret["Signature"] = signature + return ret +} diff --git a/modules/obs/client.go b/modules/obs/client.go new file mode 100755 index 000000000..731f9f465 --- /dev/null +++ b/modules/obs/client.go @@ -0,0 +1,1307 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strings" +) + +// ObsClient defines OBS client. +type ObsClient struct { + conf *config + httpClient *http.Client +} + +// New creates a new ObsClient instance. +func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) { + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, endpoint: endpoint} + conf.maxRetryCount = -1 + conf.maxRedirectCount = -1 + for _, configurer := range configurers { + configurer(conf) + } + + if err := conf.initConfigWithDefault(); err != nil { + return nil, err + } + err := conf.getTransport() + if err != nil { + return nil, err + } + + if isWarnLogEnabled() { + info := make([]string, 3) + info[0] = fmt.Sprintf("[OBS SDK Version=%s", obsSdkVersion) + info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint) + accessMode := "Virtual Hosting" + if conf.pathStyle { + accessMode = "Path" + } + info[2] = fmt.Sprintf("Access Mode=%s]", accessMode) + doLog(LEVEL_WARN, strings.Join(info, "];[")) + } + doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf) + obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}} + return obsClient, nil +} + +// Refresh refreshes ak, sk and securityToken for obsClient. +func (obsClient ObsClient) Refresh(ak, sk, securityToken string) { + sp := &securityProvider{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)} + obsClient.conf.securityProvider = sp +} + +// Close closes ObsClient. +func (obsClient ObsClient) Close() { + obsClient.httpClient = nil + obsClient.conf.transport.CloseIdleConnections() + obsClient.conf = nil +} + +// ListBuckets lists buckets. +// +// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order. +func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) { + if input == nil { + input = &ListBucketsInput{} + } + output = &ListBucketsOutput{} + err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// CreateBucket creates a bucket. +// +// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS. +func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("CreateBucketInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucket deletes a bucket. +// +// You can use this API to delete a bucket. The bucket to be deleted must be empty +// (containing no objects, noncurrent object versions, or part fragments). +func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicy sets bucket storage class. +// +// You can use this API to set storage class for bucket. +func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketStoragePolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputS3 *getBucketStoragePolicyOutputS3 + outputS3 = &getBucketStoragePolicyOutputS3{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputS3.BaseModel + output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass) + return +} + +func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputObs *getBucketStoragePolicyOutputObs + outputObs = &getBucketStoragePolicyOutputObs{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputObs.BaseModel + output.StorageClass = outputObs.StorageClass + return +} + +// GetBucketStoragePolicy gets bucket storage class. +// +// You can use this API to obtain the storage class of a bucket. +func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketStoragePolicyObs(bucketName, extensions) + } + return obsClient.getBucketStoragePolicyS3(bucketName, extensions) +} + +// ListObjects lists objects in a bucket. +// +// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed. +func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) { + if input == nil { + return nil, errors.New("ListObjectsInput is nil") + } + output = &ListObjectsOutput{} + err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListVersions lists versioning objects in a bucket. +// +// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed. +func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) { + if input == nil { + return nil, errors.New("ListVersionsInput is nil") + } + output = &ListVersionsOutput{} + err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListMultipartUploads lists the multipart uploads. +// +// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket. +func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) { + if input == nil { + return nil, errors.New("ListMultipartUploadsInput is nil") + } + output = &ListMultipartUploadsOutput{} + err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketQuota sets the bucket quota. +// +// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1. +func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketQuotaInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketQuota gets the bucket quota. +// +// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota. +func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions) + if err != nil { + output = nil + } + return +} + +// HeadBucket checks whether a bucket exists. +// +// You can use this API to check whether a bucket exists. +func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// HeadObject checks whether an object exists. +// +// You can use this API to check whether an object exists. +func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("HeadObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadata gets the metadata of a bucket. +// +// You can use this API to send a HEAD request to a bucket to obtain the bucket +// metadata such as the storage class and CORS rules (if set). +func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// SetObjectMetadata sets object metadata. +func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) { + output = &SetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseSetObjectMetadataOutput(output) + } + return +} + +// GetBucketStorageInfo gets storage information about a bucket. +// +// You can use this API to obtain storage information about a bucket, including the +// bucket size and number of objects in the bucket. +func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputS3 *getBucketLocationOutputS3 + outputS3 = &getBucketLocationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputS3.BaseModel + output.Location = outputS3.Location + } + return +} +func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputObs *getBucketLocationOutputObs + outputObs = &getBucketLocationOutputObs{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Location = outputObs.Location + } + return +} + +// GetBucketLocation gets the location of a bucket. +// +// You can use this API to obtain the bucket location. +func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketLocationObs(bucketName, extensions) + } + return obsClient.getBucketLocationS3(bucketName, extensions) +} + +// SetBucketAcl sets the bucket ACL. +// +// You can use this API to set the ACL for a bucket. +func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + var outputObs *getBucketACLOutputObs + outputObs = &getBucketACLOutputObs{} + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Owner = outputObs.Owner + output.Grants = make([]Grant, 0, len(outputObs.Grants)) + for _, valGrant := range outputObs.Grants { + tempOutput := Grant{} + tempOutput.Delivered = valGrant.Delivered + tempOutput.Permission = valGrant.Permission + tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName + tempOutput.Grantee.ID = valGrant.Grantee.ID + tempOutput.Grantee.Type = valGrant.Grantee.Type + tempOutput.Grantee.URI = GroupAllUsers + + output.Grants = append(output.Grants, tempOutput) + } + } + return +} + +// GetBucketAcl gets the bucket ACL. +// +// You can use this API to obtain a bucket ACL. +func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketACLObs(bucketName, extensions) + } + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicy sets the bucket policy. +// +// You can use this API to set a bucket policy. If the bucket already has a policy, the +// policy will be overwritten by the one specified in this request. +func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketPolicy is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicy gets the bucket policy. +// +// You can use this API to obtain the policy of a bucket. +func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicy deletes the bucket policy. +// +// You can use this API to delete the policy of a bucket. +func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketCors sets CORS rules for a bucket. +// +// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests. +func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketCorsInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketCors gets CORS rules of a bucket. +// +// You can use this API to obtain the CORS rules of a specified bucket. +func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCors deletes CORS rules of a bucket. +// +// You can use this API to delete the CORS rules of a specified bucket. +func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioning sets the versioning status for a bucket. +// +// You can use this API to set the versioning status for a bucket. +func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketVersioningInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioning gets the versioning status of a bucket. +// +// You can use this API to obtain the versioning status of a bucket. +func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfiguration sets website hosting for a bucket. +// +// You can use this API to set website hosting for a bucket. +func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketWebsiteConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket. +// +// You can use this API to obtain the website hosting settings of a bucket. +func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket. +// +// You can use this API to delete the website hosting settings of a bucket. +func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfiguration sets the bucket logging. +// +// You can use this API to configure access logging for a bucket. +func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLoggingConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfiguration gets the logging settings of a bucket. +// +// You can use this API to obtain the access logging settings of a bucket. +func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket. +// +// You can use this API to set lifecycle rules for a bucket, to periodically transit +// storage classes of objects and delete objects in the bucket. +func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLifecycleConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket. +// +// You can use this API to obtain the lifecycle rules of a bucket. +func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket. +// +// You can use this API to delete all lifecycle rules of a bucket. +func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketTagging sets bucket tags. +// +// You can use this API to set bucket tags. +func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketTaggingInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketTagging gets bucket tags. +// +// You can use this API to obtain the tags of a specified bucket. +func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTagging deletes bucket tags. +// +// You can use this API to delete the tags of a specified bucket. +func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketNotification sets event notification for a bucket. +// +// You can use this API to configure event notification for a bucket. You will be notified of all +// specified operations performed on the bucket. +func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketNotificationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketNotification gets event notification settings of a bucket. +// +// You can use this API to obtain the event notification configuration of a bucket. +func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) { + if obsClient.conf.signature != SignatureObs { + return obsClient.getBucketNotificationS3(bucketName, extensions) + } + output = &GetBucketNotificationOutput{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) { + outputS3 := &getBucketNotificationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions) + if err != nil { + return nil, err + } + + output = &GetBucketNotificationOutput{} + output.BaseModel = outputS3.BaseModel + topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations)) + for _, topicConfigurationS3 := range outputS3.TopicConfigurations { + topicConfiguration := TopicConfiguration{} + topicConfiguration.ID = topicConfigurationS3.ID + topicConfiguration.Topic = topicConfigurationS3.Topic + topicConfiguration.FilterRules = topicConfigurationS3.FilterRules + + events := make([]EventType, 0, len(topicConfigurationS3.Events)) + for _, event := range topicConfigurationS3.Events { + events = append(events, ParseStringToEventType(event)) + } + topicConfiguration.Events = events + topicConfigurations = append(topicConfigurations, topicConfiguration) + } + output.TopicConfigurations = topicConfigurations + return +} + +// DeleteObject deletes an object. +// +// You can use this API to delete an object from a specified bucket. +func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectInput is nil") + } + output = &DeleteObjectOutput{} + err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjects deletes objects in a batch. +// +// You can use this API to batch delete objects from a specified bucket. +func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectsInput is nil") + } + output = &DeleteObjectsOutput{} + err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetObjectAcl sets ACL for an object. +// +// You can use this API to set the ACL for an object in a specified bucket. +func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetObjectAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectAcl gets the ACL of an object. +// +// You can use this API to obtain the ACL of an object in a specified bucket. +func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectAclInput is nil") + } + output = &GetObjectAclOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObject restores an object. +func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("RestoreObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadata gets object metadata. +// +// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata. +func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectMetadataInput is nil") + } + output = &GetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObject downloads object. +// +// You can use this API to download an object in a specified bucket. +func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectInput is nil") + } + output = &GetObjectOutput{} + err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObject uploads an object to the specified bucket. +func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutObjectInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + output = &PutObjectOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if input.ContentLength > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength} + } + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + return contentType + } + if contentType, ok := mimeTypes[strings.ToLower(sourceFile[strings.LastIndex(sourceFile, ".")+1:])]; ok { + return contentType + } + return +} + +func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool { + if input.ContentType == "" && input.Key != "" { + return true + } + return false +} + +// PutFile uploads a file to the specified bucket. +func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutFileInput is nil") + } + + var body io.Reader + sourceFile := strings.TrimSpace(input.SourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + if input.ContentLength > 0 { + if input.ContentLength > stat.Size() { + input.ContentLength = stat.Size() + } + fileReaderWrapper.totalCount = input.ContentLength + } else { + fileReaderWrapper.totalCount = stat.Size() + } + body = fileReaderWrapper + } + + _input := &PutObjectInput{} + _input.PutObjectBasicInput = input.PutObjectBasicInput + _input.Body = body + + if obsClient.isGetContentType(_input) { + _input.ContentType = obsClient.getContentType(_input, sourceFile) + } + + output = &PutObjectOutput{} + err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObject creates a copy for an existing object. +// +// You can use this API to create a copy for an object in a specified bucket. +func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) { + if input == nil { + return nil, errors.New("CopyObjectInput is nil") + } + + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyObjectOutput{} + err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("AbortMultipartUploadInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUpload initializes a multipart upload. +func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("InitiateMultipartUploadInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + + output = &InitiateMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + } + return +} + +// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket +// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB, +// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000. +func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) { + if _input == nil { + return nil, errors.New("UploadPartInput is nil") + } + + if _input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + input := &UploadPartInput{} + input.Bucket = _input.Bucket + input.Key = _input.Key + input.PartNumber = _input.PartNumber + input.UploadId = _input.UploadId + input.ContentMD5 = _input.ContentMD5 + input.SourceFile = _input.SourceFile + input.Offset = _input.Offset + input.PartSize = _input.PartSize + input.SseHeader = _input.SseHeader + input.Body = _input.Body + + output = &UploadPartOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize} + } + } else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileSize := stat.Size() + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + if input.Offset < 0 || input.Offset > fileSize { + input.Offset = 0 + } + + if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) { + input.PartSize = fileSize - input.Offset + } + fileReaderWrapper.totalCount = input.PartSize + if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil { + return nil, err + } + input.Body = fileReaderWrapper + repeatable = true + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} + +// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("CompleteMultipartUploadInput is nil") + } + + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + var parts partSlice = input.Parts + sort.Sort(parts) + + output = &CompleteMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + } + return +} + +// ListParts lists the uploaded parts in a bucket by using the multipart upload ID. +func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) { + if input == nil { + return nil, errors.New("ListPartsInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &ListPartsOutput{} + err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// CopyPart copy a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) { + if input == nil { + return nil, errors.New("CopyPartInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyPartOutput{} + err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} + +// SetBucketRequestPayment sets requester-pays setting for a bucket. +func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketRequestPaymentInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPayment gets requester-pays setting of a bucket. +func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions) + if err != nil { + output = nil + } + return +} + +// UploadFile resume uploads. +// +// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file +// upload failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.UploadFile + ".uploadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize < MIN_PART_SIZE { + input.PartSize = MIN_PART_SIZE + } else if input.PartSize > MAX_PART_SIZE { + input.PartSize = MAX_PART_SIZE + } + + output, err = obsClient.resumeUpload(input, extensions) + return +} + +// DownloadFile resume downloads. +// +// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file +// download failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input.DownloadFile == "" { + input.DownloadFile = input.Key + } + + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.DownloadFile + ".downloadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize <= 0 { + input.PartSize = DEFAULT_PART_SIZE + } + + output, err = obsClient.resumeDownload(input, extensions) + return +} + +// SetBucketFetchPolicy sets the bucket fetch policy. +// +// You can use this API to set a bucket fetch policy. +func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchPolicyInput is nil") + } + if strings.TrimSpace(string(input.Status)) == "" { + return nil, errors.New("Fetch policy status is empty") + } + if strings.TrimSpace(input.Agency) == "" { + return nil, errors.New("Fetch policy agency is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchPolicy gets the bucket fetch policy. +// +// You can use this API to obtain the fetch policy of a bucket. +func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchPolicyInput is nil") + } + output = &GetBucketFetchPolicyOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketFetchPolicy deletes the bucket fetch policy. +// +// You can use this API to delete the fetch policy of a bucket. +func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("DeleteBucketFetchPolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketFetchJob sets the bucket fetch job. +// +// You can use this API to set a bucket fetch job. +func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.URL) == "" { + return nil, errors.New("URL is empty") + } + output = &SetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchJob gets the bucket fetch job. +// +// You can use this API to obtain the fetch job of a bucket. +func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.JobID) == "" { + return nil, errors.New("JobID is empty") + } + output = &GetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions) + if err != nil { + output = nil + } + return +} diff --git a/modules/obs/conf.go b/modules/obs/conf.go new file mode 100755 index 000000000..4b8525bfb --- /dev/null +++ b/modules/obs/conf.go @@ -0,0 +1,471 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +type securityProvider struct { + ak string + sk string + securityToken string +} + +type urlHolder struct { + scheme string + host string + port int +} + +type config struct { + securityProvider *securityProvider + urlHolder *urlHolder + pathStyle bool + cname bool + sslVerify bool + endpoint string + signature SignatureType + region string + connectTimeout int + socketTimeout int + headerTimeout int + idleConnTimeout int + finalTimeout int + maxRetryCount int + proxyURL string + maxConnsPerHost int + pemCerts []byte + transport *http.Transport + ctx context.Context + maxRedirectCount int +} + +func (conf config) String() string { + return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+ + "\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+ + "\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]", + conf.endpoint, conf.signature, conf.pathStyle, conf.region, + conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout, + conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount, + ) +} + +type configurer func(conf *config) + +// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts. +func WithSslVerify(sslVerify bool) configurer { + return WithSslVerifyAndPemCerts(sslVerify, nil) +} + +// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts. +func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer { + return func(conf *config) { + conf.sslVerify = sslVerify + conf.pemCerts = pemCerts + } +} + +// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers. +func WithHeaderTimeout(headerTimeout int) configurer { + return func(conf *config) { + conf.headerTimeout = headerTimeout + } +} + +// WithProxyUrl is a configurer for ObsClient to set HTTP proxy. +func WithProxyUrl(proxyURL string) configurer { + return func(conf *config) { + conf.proxyURL = proxyURL + } +} + +// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections. +func WithMaxConnections(maxConnsPerHost int) configurer { + return func(conf *config) { + conf.maxConnsPerHost = maxConnsPerHost + } +} + +// WithPathStyle is a configurer for ObsClient. +func WithPathStyle(pathStyle bool) configurer { + return func(conf *config) { + conf.pathStyle = pathStyle + } +} + +// WithSignature is a configurer for ObsClient. +func WithSignature(signature SignatureType) configurer { + return func(conf *config) { + conf.signature = signature + } +} + +// WithRegion is a configurer for ObsClient. +func WithRegion(region string) configurer { + return func(conf *config) { + conf.region = region + } +} + +// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing +// an http/https connection, in seconds. +func WithConnectTimeout(connectTimeout int) configurer { + return func(conf *config) { + conf.connectTimeout = connectTimeout + } +} + +// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at +// the socket layer, in seconds. +func WithSocketTimeout(socketTimeout int) configurer { + return func(conf *config) { + conf.socketTimeout = socketTimeout + } +} + +// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection +// in the connection pool, in seconds. +func WithIdleConnTimeout(idleConnTimeout int) configurer { + return func(conf *config) { + conf.idleConnTimeout = idleConnTimeout + } +} + +// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal. +func WithMaxRetryCount(maxRetryCount int) configurer { + return func(conf *config) { + conf.maxRetryCount = maxRetryCount + } +} + +// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys. +func WithSecurityToken(securityToken string) configurer { + return func(conf *config) { + conf.securityProvider.securityToken = securityToken + } +} + +// WithHttpTransport is a configurer for ObsClient to set the customized http Transport. +func WithHttpTransport(transport *http.Transport) configurer { + return func(conf *config) { + conf.transport = transport + } +} + +// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request. +func WithRequestContext(ctx context.Context) configurer { + return func(conf *config) { + conf.ctx = ctx + } +} + +// WithCustomDomainName is a configurer for ObsClient. +func WithCustomDomainName(cname bool) configurer { + return func(conf *config) { + conf.cname = cname + } +} + +// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected. +func WithMaxRedirectCount(maxRedirectCount int) configurer { + return func(conf *config) { + conf.maxRedirectCount = maxRedirectCount + } +} + +func (conf *config) prepareConfig() { + if conf.connectTimeout <= 0 { + conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT + } + + if conf.socketTimeout <= 0 { + conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT + } + + conf.finalTimeout = conf.socketTimeout * 10 + + if conf.headerTimeout <= 0 { + conf.headerTimeout = DEFAULT_HEADER_TIMEOUT + } + + if conf.idleConnTimeout < 0 { + conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT + } + + if conf.maxRetryCount < 0 { + conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT + } + + if conf.maxConnsPerHost <= 0 { + conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST + } + + if conf.maxRedirectCount < 0 { + conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT + } +} + +func (conf *config) initConfigWithDefault() error { + conf.securityProvider.ak = strings.TrimSpace(conf.securityProvider.ak) + conf.securityProvider.sk = strings.TrimSpace(conf.securityProvider.sk) + conf.securityProvider.securityToken = strings.TrimSpace(conf.securityProvider.securityToken) + conf.endpoint = strings.TrimSpace(conf.endpoint) + if conf.endpoint == "" { + return errors.New("endpoint is not set") + } + + if index := strings.Index(conf.endpoint, "?"); index > 0 { + conf.endpoint = conf.endpoint[:index] + } + + for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 { + conf.endpoint = conf.endpoint[:len(conf.endpoint)-1] + } + + if conf.signature == "" { + conf.signature = DEFAULT_SIGNATURE + } + + urlHolder := &urlHolder{} + var address string + if strings.HasPrefix(conf.endpoint, "https://") { + urlHolder.scheme = "https" + address = conf.endpoint[len("https://"):] + } else if strings.HasPrefix(conf.endpoint, "http://") { + urlHolder.scheme = "http" + address = conf.endpoint[len("http://"):] + } else { + urlHolder.scheme = "https" + address = conf.endpoint + } + + addr := strings.Split(address, ":") + if len(addr) == 2 { + if port, err := strconv.Atoi(addr[1]); err == nil { + urlHolder.port = port + } + } + urlHolder.host = addr[0] + if urlHolder.port == 0 { + if urlHolder.scheme == "https" { + urlHolder.port = 443 + } else { + urlHolder.port = 80 + } + } + + if IsIP(urlHolder.host) { + conf.pathStyle = true + } + + conf.urlHolder = urlHolder + + conf.region = strings.TrimSpace(conf.region) + if conf.region == "" { + conf.region = DEFAULT_REGION + } + + conf.prepareConfig() + conf.proxyURL = strings.TrimSpace(conf.proxyURL) + return nil +} + +func (conf *config) getTransport() error { + if conf.transport == nil { + conf.transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout)) + if err != nil { + return nil, err + } + return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil + }, + MaxIdleConns: conf.maxConnsPerHost, + MaxIdleConnsPerHost: conf.maxConnsPerHost, + ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout), + IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout), + } + + if conf.proxyURL != "" { + proxyURL, err := url.Parse(conf.proxyURL) + if err != nil { + return err + } + conf.transport.Proxy = http.ProxyURL(proxyURL) + } + + tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify} + if conf.sslVerify && conf.pemCerts != nil { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(conf.pemCerts) + tlsConfig.RootCAs = pool + } + + conf.transport.TLSClientConfig = tlsConfig + conf.transport.DisableCompression = true + } + + return nil +} + +func checkRedirectFunc(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse +} + +// DummyQueryEscape return the input string. +func DummyQueryEscape(s string) string { + return s +} + +func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) { + urlHolder := conf.urlHolder + if conf.cname { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + if conf.signature == "v4" { + canonicalizedURL = "/" + } else { + canonicalizedURL = "/" + urlHolder.host + "/" + } + } else { + if bucketName == "" { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + canonicalizedURL = "/" + } else { + if conf.pathStyle { + requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName) + canonicalizedURL = "/" + bucketName + } else { + requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port) + if conf.signature == "v2" || conf.signature == "OBS" { + canonicalizedURL = "/" + bucketName + "/" + } else { + canonicalizedURL = "/" + } + } + } + } + return +} + +func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) { + if escape { + tempKey := []rune(objectKey) + result := make([]string, 0, len(tempKey)) + for _, value := range tempKey { + if string(value) == "/" { + result = append(result, string(value)) + } else { + if string(value) == " " { + result = append(result, url.PathEscape(string(value))) + } else { + result = append(result, url.QueryEscape(string(value))) + } + } + } + encodeObjectKey = strings.Join(result, "") + } else { + encodeObjectKey = escapeFunc(objectKey) + } + return +} + +func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) { + if escape { + return url.QueryEscape + } + return DummyQueryEscape +} + +func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) { + + requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName) + var escapeFunc func(s string) string + escapeFunc = conf.prepareEscapeFunc(escape) + + if objectKey != "" { + var encodeObjectKey string + encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc) + requestURL += "/" + encodeObjectKey + if !strings.HasSuffix(canonicalizedURL, "/") { + canonicalizedURL += "/" + } + canonicalizedURL += encodeObjectKey + } + + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, strings.TrimSpace(key)) + } + sort.Strings(keys) + i := 0 + + for index, key := range keys { + if index == 0 { + requestURL += "?" + } else { + requestURL += "&" + } + _key := url.QueryEscape(key) + requestURL += _key + + _value := params[key] + if conf.signature == "v4" { + requestURL += "=" + url.QueryEscape(_value) + } else { + if _value != "" { + requestURL += "=" + url.QueryEscape(_value) + _value = "=" + _value + } else { + _value = "" + } + lowerKey := strings.ToLower(key) + _, ok := allowedResourceParameterNames[lowerKey] + prefixHeader := HEADER_PREFIX + isObs := conf.signature == SignatureObs + if isObs { + prefixHeader = HEADER_PREFIX_OBS + } + ok = ok || strings.HasPrefix(lowerKey, prefixHeader) + if ok { + if i == 0 { + canonicalizedURL += "?" + } else { + canonicalizedURL += "&" + } + canonicalizedURL += getQueryURL(_key, _value) + i++ + } + } + } + return +} + +func getQueryURL(key, value string) string { + queryURL := "" + queryURL += key + queryURL += value + return queryURL +} diff --git a/modules/obs/const.go b/modules/obs/const.go new file mode 100755 index 000000000..89f1e08eb --- /dev/null +++ b/modules/obs/const.go @@ -0,0 +1,932 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +const ( + obsSdkVersion = "3.20.9" + USER_AGENT = "obs-sdk-go/" + obsSdkVersion + HEADER_PREFIX = "x-amz-" + HEADER_PREFIX_META = "x-amz-meta-" + HEADER_PREFIX_OBS = "x-obs-" + HEADER_PREFIX_META_OBS = "x-obs-meta-" + HEADER_DATE_AMZ = "x-amz-date" + HEADER_DATE_OBS = "x-obs-date" + HEADER_STS_TOKEN_AMZ = "x-amz-security-token" + HEADER_STS_TOKEN_OBS = "x-obs-security-token" + HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId" + PREFIX_META = "meta-" + + HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256" + HEADER_ACL_AMZ = "x-amz-acl" + HEADER_ACL_OBS = "x-obs-acl" + HEADER_ACL = "acl" + HEADER_LOCATION_AMZ = "location" + HEADER_BUCKET_LOCATION_OBS = "bucket-location" + HEADER_COPY_SOURCE = "copy-source" + HEADER_COPY_SOURCE_RANGE = "copy-source-range" + HEADER_RANGE = "Range" + HEADER_STORAGE_CLASS = "x-default-storage-class" + HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class" + HEADER_VERSION_OBS = "version" + HEADER_GRANT_READ_OBS = "grant-read" + HEADER_GRANT_WRITE_OBS = "grant-write" + HEADER_GRANT_READ_ACP_OBS = "grant-read-acp" + HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp" + HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control" + HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered" + HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered" + HEADER_REQUEST_ID = "request-id" + HEADER_BUCKET_REGION = "bucket-region" + HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin" + HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers" + HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age" + HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods" + HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers" + HEADER_EPID_HEADERS = "epid" + HEADER_VERSION_ID = "version-id" + HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id" + HEADER_DELETE_MARKER = "delete-marker" + HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location" + HEADER_METADATA_DIRECTIVE = "metadata-directive" + HEADER_EXPIRATION = "expiration" + HEADER_EXPIRES_OBS = "x-obs-expires" + HEADER_RESTORE = "restore" + HEADER_OBJECT_TYPE = "object-type" + HEADER_NEXT_APPEND_POSITION = "next-append-position" + HEADER_STORAGE_CLASS2 = "storage-class" + HEADER_CONTENT_LENGTH = "content-length" + HEADER_CONTENT_TYPE = "content-type" + HEADER_CONTENT_LANGUAGE = "content-language" + HEADER_EXPIRES = "expires" + HEADER_CACHE_CONTROL = "cache-control" + HEADER_CONTENT_DISPOSITION = "content-disposition" + HEADER_CONTENT_ENCODING = "content-encoding" + HEADER_AZ_REDUNDANCY = "az-redundancy" + headerOefMarker = "oef-marker" + + HEADER_ETAG = "etag" + HEADER_LASTMODIFIED = "last-modified" + + HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match" + HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match" + HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since" + HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since" + + HEADER_IF_MATCH = "If-Match" + HEADER_IF_NONE_MATCH = "If-None-Match" + HEADER_IF_MODIFIED_SINCE = "If-Modified-Since" + HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since" + + HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm" + HEADER_SSEC_KEY = "server-side-encryption-customer-key" + HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_ENCRYPTION = "server-side-encryption" + HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id" + HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id" + + HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm" + HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key" + HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id" + + HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id" + + HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect" + + HEADER_DATE_CAMEL = "Date" + HEADER_HOST_CAMEL = "Host" + HEADER_HOST = "host" + HEADER_AUTH_CAMEL = "Authorization" + HEADER_MD5_CAMEL = "Content-MD5" + HEADER_LOCATION_CAMEL = "Location" + HEADER_CONTENT_LENGTH_CAMEL = "Content-Length" + HEADER_CONTENT_TYPE_CAML = "Content-Type" + HEADER_USER_AGENT_CAMEL = "User-Agent" + HEADER_ORIGIN_CAMEL = "Origin" + HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers" + HEADER_CACHE_CONTROL_CAMEL = "Cache-Control" + HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition" + HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding" + HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language" + HEADER_EXPIRES_CAMEL = "Expires" + + PARAM_VERSION_ID = "versionId" + PARAM_RESPONSE_CONTENT_TYPE = "response-content-type" + PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language" + PARAM_RESPONSE_EXPIRES = "response-expires" + PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control" + PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition" + PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding" + PARAM_IMAGE_PROCESS = "x-image-process" + + PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm" + PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential" + PARAM_DATE_AMZ_CAMEL = "X-Amz-Date" + PARAM_DATE_OBS_CAMEL = "X-Obs-Date" + PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires" + PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders" + PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature" + + DEFAULT_SIGNATURE = SignatureV2 + DEFAULT_REGION = "region" + DEFAULT_CONNECT_TIMEOUT = 60 + DEFAULT_SOCKET_TIMEOUT = 60 + DEFAULT_HEADER_TIMEOUT = 60 + DEFAULT_IDLE_CONN_TIMEOUT = 30 + DEFAULT_MAX_RETRY_COUNT = 3 + DEFAULT_MAX_REDIRECT_COUNT = 3 + DEFAULT_MAX_CONN_PER_HOST = 1000 + EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD" + LONG_DATE_FORMAT = "20060102T150405Z" + SHORT_DATE_FORMAT = "20060102" + ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z" + ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z" + RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT" + + V4_SERVICE_NAME = "s3" + V4_SERVICE_SUFFIX = "aws4_request" + + V2_HASH_PREFIX = "AWS" + OBS_HASH_PREFIX = "OBS" + + V4_HASH_PREFIX = "AWS4-HMAC-SHA256" + V4_HASH_PRE = "AWS4" + + DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms" + DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms" + + DEFAULT_SSE_C_ENCRYPTION = "AES256" + + HTTP_GET = "GET" + HTTP_POST = "POST" + HTTP_PUT = "PUT" + HTTP_DELETE = "DELETE" + HTTP_HEAD = "HEAD" + HTTP_OPTIONS = "OPTIONS" + + REQUEST_PAYER = "request-payer" + MULTI_AZ = "3az" + + MAX_PART_SIZE = 5 * 1024 * 1024 * 1024 + MIN_PART_SIZE = 100 * 1024 + DEFAULT_PART_SIZE = 9 * 1024 * 1024 + MAX_PART_NUM = 10000 +) + +// SignatureType defines type of signature +type SignatureType string + +const ( + // SignatureV2 signature type v2 + SignatureV2 SignatureType = "v2" + // SignatureV4 signature type v4 + SignatureV4 SignatureType = "v4" + // SignatureObs signature type OBS + SignatureObs SignatureType = "OBS" +) + +var ( + interestedHeaders = []string{"content-md5", "content-type", "date"} + + allowedRequestHTTPHeaderMetadataNames = map[string]bool{ + "content-type": true, + "content-md5": true, + "content-length": true, + "content-language": true, + "expires": true, + "origin": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, + "access-control-request-method": true, + "access-control-request-headers": true, + "x-default-storage-class": true, + "location": true, + "date": true, + "etag": true, + "range": true, + "host": true, + "if-modified-since": true, + "if-unmodified-since": true, + "if-match": true, + "if-none-match": true, + "last-modified": true, + "content-range": true, + } + + allowedResourceParameterNames = map[string]bool{ + "acl": true, + "backtosource": true, + "metadata": true, + "policy": true, + "torrent": true, + "logging": true, + "location": true, + "storageinfo": true, + "quota": true, + "storageclass": true, + "storagepolicy": true, + "requestpayment": true, + "versions": true, + "versioning": true, + "versionid": true, + "uploads": true, + "uploadid": true, + "partnumber": true, + "website": true, + "notification": true, + "lifecycle": true, + "deletebucket": true, + "delete": true, + "cors": true, + "restore": true, + "tagging": true, + "append": true, + "position": true, + "replication": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "x-image-process": true, + "x-oss-process": true, + "x-image-save-bucket": true, + "x-image-save-object": true, + "ignore-sign-in-query": true, + } + + mimeTypes = map[string]string{ + "001": "application/x-001", + "301": "application/x-301", + "323": "text/h323", + "7z": "application/x-7z-compressed", + "906": "application/x-906", + "907": "drawing/907", + "IVF": "video/x-ivf", + "a11": "application/x-a11", + "aac": "audio/x-aac", + "acp": "audio/x-mei-aac", + "ai": "application/postscript", + "aif": "audio/aiff", + "aifc": "audio/aiff", + "aiff": "audio/aiff", + "anv": "application/x-anv", + "apk": "application/vnd.android.package-archive", + "asa": "text/asa", + "asf": "video/x-ms-asf", + "asp": "text/asp", + "asx": "video/x-ms-asf", + "atom": "application/atom+xml", + "au": "audio/basic", + "avi": "video/avi", + "awf": "application/vnd.adobe.workflow", + "biz": "text/xml", + "bmp": "application/x-bmp", + "bot": "application/x-bot", + "bz2": "application/x-bzip2", + "c4t": "application/x-c4t", + "c90": "application/x-c90", + "cal": "application/x-cals", + "cat": "application/vnd.ms-pki.seccat", + "cdf": "application/x-netcdf", + "cdr": "application/x-cdr", + "cel": "application/x-cel", + "cer": "application/x-x509-ca-cert", + "cg4": "application/x-g4", + "cgm": "application/x-cgm", + "cit": "application/x-cit", + "class": "java/*", + "cml": "text/xml", + "cmp": "application/x-cmp", + "cmx": "application/x-cmx", + "cot": "application/x-cot", + "crl": "application/pkix-crl", + "crt": "application/x-x509-ca-cert", + "csi": "application/x-csi", + "css": "text/css", + "csv": "text/csv", + "cu": "application/cu-seeme", + "cut": "application/x-cut", + "dbf": "application/x-dbf", + "dbm": "application/x-dbm", + "dbx": "application/x-dbx", + "dcd": "text/xml", + "dcx": "application/x-dcx", + "deb": "application/x-debian-package", + "der": "application/x-x509-ca-cert", + "dgn": "application/x-dgn", + "dib": "application/x-dib", + "dll": "application/x-msdownload", + "doc": "application/msword", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "dot": "application/msword", + "drw": "application/x-drw", + "dtd": "text/xml", + "dvi": "application/x-dvi", + "dwf": "application/x-dwf", + "dwg": "application/x-dwg", + "dxb": "application/x-dxb", + "dxf": "application/x-dxf", + "edn": "application/vnd.adobe.edn", + "emf": "application/x-emf", + "eml": "message/rfc822", + "ent": "text/xml", + "eot": "application/vnd.ms-fontobject", + "epi": "application/x-epi", + "eps": "application/postscript", + "epub": "application/epub+zip", + "etd": "application/x-ebx", + "etx": "text/x-setext", + "exe": "application/x-msdownload", + "fax": "image/fax", + "fdf": "application/vnd.fdf", + "fif": "application/fractals", + "flac": "audio/flac", + "flv": "video/x-flv", + "fo": "text/xml", + "frm": "application/x-frm", + "g4": "application/x-g4", + "gbr": "application/x-gbr", + "gif": "image/gif", + "gl2": "application/x-gl2", + "gp4": "application/x-gp4", + "gz": "application/gzip", + "hgl": "application/x-hgl", + "hmr": "application/x-hmr", + "hpg": "application/x-hpgl", + "hpl": "application/x-hpl", + "hqx": "application/mac-binhex40", + "hrf": "application/x-hrf", + "hta": "application/hta", + "htc": "text/x-component", + "htm": "text/html", + "html": "text/html", + "htt": "text/webviewhtml", + "htx": "text/html", + "icb": "application/x-icb", + "ico": "application/x-ico", + "ics": "text/calendar", + "iff": "application/x-iff", + "ig4": "application/x-g4", + "igs": "application/x-igs", + "iii": "application/x-iphone", + "img": "application/x-img", + "ini": "text/plain", + "ins": "application/x-internet-signup", + "ipa": "application/vnd.iphone", + "iso": "application/x-iso9660-image", + "isp": "application/x-internet-signup", + "jar": "application/java-archive", + "java": "java/*", + "jfif": "image/jpeg", + "jpe": "image/jpeg", + "jpeg": "image/jpeg", + "jpg": "image/jpeg", + "js": "application/x-javascript", + "json": "application/json", + "jsp": "text/html", + "la1": "audio/x-liquid-file", + "lar": "application/x-laplayer-reg", + "latex": "application/x-latex", + "lavs": "audio/x-liquid-secure", + "lbm": "application/x-lbm", + "lmsff": "audio/x-la-lms", + "log": "text/plain", + "ls": "application/x-javascript", + "ltr": "application/x-ltr", + "m1v": "video/x-mpeg", + "m2v": "video/x-mpeg", + "m3u": "audio/mpegurl", + "m4a": "audio/mp4", + "m4e": "video/mpeg4", + "m4v": "video/mp4", + "mac": "application/x-mac", + "man": "application/x-troff-man", + "math": "text/xml", + "mdb": "application/msaccess", + "mfp": "application/x-shockwave-flash", + "mht": "message/rfc822", + "mhtml": "message/rfc822", + "mi": "application/x-mi", + "mid": "audio/mid", + "midi": "audio/mid", + "mil": "application/x-mil", + "mml": "text/xml", + "mnd": "audio/x-musicnet-download", + "mns": "audio/x-musicnet-stream", + "mocha": "application/x-javascript", + "mov": "video/quicktime", + "movie": "video/x-sgi-movie", + "mp1": "audio/mp1", + "mp2": "audio/mp2", + "mp2v": "video/mpeg", + "mp3": "audio/mp3", + "mp4": "video/mp4", + "mp4a": "audio/mp4", + "mp4v": "video/mp4", + "mpa": "video/x-mpg", + "mpd": "application/vnd.ms-project", + "mpe": "video/mpeg", + "mpeg": "video/mpeg", + "mpg": "video/mpeg", + "mpg4": "video/mp4", + "mpga": "audio/rn-mpeg", + "mpp": "application/vnd.ms-project", + "mps": "video/x-mpeg", + "mpt": "application/vnd.ms-project", + "mpv": "video/mpg", + "mpv2": "video/mpeg", + "mpw": "application/vnd.ms-project", + "mpx": "application/vnd.ms-project", + "mtx": "text/xml", + "mxp": "application/x-mmxp", + "net": "image/pnetvue", + "nrf": "application/x-nrf", + "nws": "message/rfc822", + "odc": "text/x-ms-odc", + "oga": "audio/ogg", + "ogg": "audio/ogg", + "ogv": "video/ogg", + "ogx": "application/ogg", + "out": "application/x-out", + "p10": "application/pkcs10", + "p12": "application/x-pkcs12", + "p7b": "application/x-pkcs7-certificates", + "p7c": "application/pkcs7-mime", + "p7m": "application/pkcs7-mime", + "p7r": "application/x-pkcs7-certreqresp", + "p7s": "application/pkcs7-signature", + "pbm": "image/x-portable-bitmap", + "pc5": "application/x-pc5", + "pci": "application/x-pci", + "pcl": "application/x-pcl", + "pcx": "application/x-pcx", + "pdf": "application/pdf", + "pdx": "application/vnd.adobe.pdx", + "pfx": "application/x-pkcs12", + "pgl": "application/x-pgl", + "pgm": "image/x-portable-graymap", + "pic": "application/x-pic", + "pko": "application/vnd.ms-pki.pko", + "pl": "application/x-perl", + "plg": "text/html", + "pls": "audio/scpls", + "plt": "application/x-plt", + "png": "image/png", + "pnm": "image/x-portable-anymap", + "pot": "application/vnd.ms-powerpoint", + "ppa": "application/vnd.ms-powerpoint", + "ppm": "application/x-ppm", + "pps": "application/vnd.ms-powerpoint", + "ppt": "application/vnd.ms-powerpoint", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "pr": "application/x-pr", + "prf": "application/pics-rules", + "prn": "application/x-prn", + "prt": "application/x-prt", + "ps": "application/postscript", + "ptn": "application/x-ptn", + "pwz": "application/vnd.ms-powerpoint", + "qt": "video/quicktime", + "r3t": "text/vnd.rn-realtext3d", + "ra": "audio/vnd.rn-realaudio", + "ram": "audio/x-pn-realaudio", + "rar": "application/x-rar-compressed", + "ras": "application/x-ras", + "rat": "application/rat-file", + "rdf": "text/xml", + "rec": "application/vnd.rn-recording", + "red": "application/x-red", + "rgb": "application/x-rgb", + "rjs": "application/vnd.rn-realsystem-rjs", + "rjt": "application/vnd.rn-realsystem-rjt", + "rlc": "application/x-rlc", + "rle": "application/x-rle", + "rm": "application/vnd.rn-realmedia", + "rmf": "application/vnd.adobe.rmf", + "rmi": "audio/mid", + "rmj": "application/vnd.rn-realsystem-rmj", + "rmm": "audio/x-pn-realaudio", + "rmp": "application/vnd.rn-rn_music_package", + "rms": "application/vnd.rn-realmedia-secure", + "rmvb": "application/vnd.rn-realmedia-vbr", + "rmx": "application/vnd.rn-realsystem-rmx", + "rnx": "application/vnd.rn-realplayer", + "rp": "image/vnd.rn-realpix", + "rpm": "audio/x-pn-realaudio-plugin", + "rsml": "application/vnd.rn-rsml", + "rss": "application/rss+xml", + "rt": "text/vnd.rn-realtext", + "rtf": "application/x-rtf", + "rv": "video/vnd.rn-realvideo", + "sam": "application/x-sam", + "sat": "application/x-sat", + "sdp": "application/sdp", + "sdw": "application/x-sdw", + "sgm": "text/sgml", + "sgml": "text/sgml", + "sis": "application/vnd.symbian.install", + "sisx": "application/vnd.symbian.install", + "sit": "application/x-stuffit", + "slb": "application/x-slb", + "sld": "application/x-sld", + "slk": "drawing/x-slk", + "smi": "application/smil", + "smil": "application/smil", + "smk": "application/x-smk", + "snd": "audio/basic", + "sol": "text/plain", + "sor": "text/plain", + "spc": "application/x-pkcs7-certificates", + "spl": "application/futuresplash", + "spp": "text/xml", + "ssm": "application/streamingmedia", + "sst": "application/vnd.ms-pki.certstore", + "stl": "application/vnd.ms-pki.stl", + "stm": "text/html", + "sty": "application/x-sty", + "svg": "image/svg+xml", + "swf": "application/x-shockwave-flash", + "tar": "application/x-tar", + "tdf": "application/x-tdf", + "tg4": "application/x-tg4", + "tga": "application/x-tga", + "tif": "image/tiff", + "tiff": "image/tiff", + "tld": "text/xml", + "top": "drawing/x-top", + "torrent": "application/x-bittorrent", + "tsd": "text/xml", + "ttf": "application/x-font-ttf", + "txt": "text/plain", + "uin": "application/x-icq", + "uls": "text/iuls", + "vcf": "text/x-vcard", + "vda": "application/x-vda", + "vdx": "application/vnd.visio", + "vml": "text/xml", + "vpg": "application/x-vpeg005", + "vsd": "application/vnd.visio", + "vss": "application/vnd.visio", + "vst": "application/x-vst", + "vsw": "application/vnd.visio", + "vsx": "application/vnd.visio", + "vtx": "application/vnd.visio", + "vxml": "text/xml", + "wav": "audio/wav", + "wax": "audio/x-ms-wax", + "wb1": "application/x-wb1", + "wb2": "application/x-wb2", + "wb3": "application/x-wb3", + "wbmp": "image/vnd.wap.wbmp", + "webm": "video/webm", + "wiz": "application/msword", + "wk3": "application/x-wk3", + "wk4": "application/x-wk4", + "wkq": "application/x-wkq", + "wks": "application/x-wks", + "wm": "video/x-ms-wm", + "wma": "audio/x-ms-wma", + "wmd": "application/x-ms-wmd", + "wmf": "application/x-wmf", + "wml": "text/vnd.wap.wml", + "wmv": "video/x-ms-wmv", + "wmx": "video/x-ms-wmx", + "wmz": "application/x-ms-wmz", + "woff": "application/x-font-woff", + "wp6": "application/x-wp6", + "wpd": "application/x-wpd", + "wpg": "application/x-wpg", + "wpl": "application/vnd.ms-wpl", + "wq1": "application/x-wq1", + "wr1": "application/x-wr1", + "wri": "application/x-wri", + "wrk": "application/x-wrk", + "ws": "application/x-ws", + "ws2": "application/x-ws", + "wsc": "text/scriptlet", + "wsdl": "text/xml", + "wvx": "video/x-ms-wvx", + "x_b": "application/x-x_b", + "x_t": "application/x-x_t", + "xap": "application/x-silverlight-app", + "xbm": "image/x-xbitmap", + "xdp": "application/vnd.adobe.xdp", + "xdr": "text/xml", + "xfd": "application/vnd.adobe.xfd", + "xfdf": "application/vnd.adobe.xfdf", + "xhtml": "text/html", + "xls": "application/vnd.ms-excel", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "xlw": "application/x-xlw", + "xml": "text/xml", + "xpl": "audio/scpls", + "xpm": "image/x-xpixmap", + "xq": "text/xml", + "xql": "text/xml", + "xquery": "text/xml", + "xsd": "text/xml", + "xsl": "text/xml", + "xslt": "text/xml", + "xwd": "application/x-xwd", + "yaml": "text/yaml", + "yml": "text/yaml", + "zip": "application/zip", + } +) + +// HttpMethodType defines http method type +type HttpMethodType string + +const ( + HttpMethodGet HttpMethodType = HTTP_GET + HttpMethodPut HttpMethodType = HTTP_PUT + HttpMethodPost HttpMethodType = HTTP_POST + HttpMethodDelete HttpMethodType = HTTP_DELETE + HttpMethodHead HttpMethodType = HTTP_HEAD + HttpMethodOptions HttpMethodType = HTTP_OPTIONS +) + +// SubResourceType defines the subResource value +type SubResourceType string + +const ( + // SubResourceStoragePolicy subResource value: storagePolicy + SubResourceStoragePolicy SubResourceType = "storagePolicy" + + // SubResourceStorageClass subResource value: storageClass + SubResourceStorageClass SubResourceType = "storageClass" + + // SubResourceQuota subResource value: quota + SubResourceQuota SubResourceType = "quota" + + // SubResourceStorageInfo subResource value: storageinfo + SubResourceStorageInfo SubResourceType = "storageinfo" + + // SubResourceLocation subResource value: location + SubResourceLocation SubResourceType = "location" + + // SubResourceAcl subResource value: acl + SubResourceAcl SubResourceType = "acl" + + // SubResourcePolicy subResource value: policy + SubResourcePolicy SubResourceType = "policy" + + // SubResourceCors subResource value: cors + SubResourceCors SubResourceType = "cors" + + // SubResourceVersioning subResource value: versioning + SubResourceVersioning SubResourceType = "versioning" + + // SubResourceWebsite subResource value: website + SubResourceWebsite SubResourceType = "website" + + // SubResourceLogging subResource value: logging + SubResourceLogging SubResourceType = "logging" + + // SubResourceLifecycle subResource value: lifecycle + SubResourceLifecycle SubResourceType = "lifecycle" + + // SubResourceNotification subResource value: notification + SubResourceNotification SubResourceType = "notification" + + // SubResourceTagging subResource value: tagging + SubResourceTagging SubResourceType = "tagging" + + // SubResourceDelete subResource value: delete + SubResourceDelete SubResourceType = "delete" + + // SubResourceVersions subResource value: versions + SubResourceVersions SubResourceType = "versions" + + // SubResourceUploads subResource value: uploads + SubResourceUploads SubResourceType = "uploads" + + // SubResourceRestore subResource value: restore + SubResourceRestore SubResourceType = "restore" + + // SubResourceMetadata subResource value: metadata + SubResourceMetadata SubResourceType = "metadata" + + // SubResourceRequestPayment subResource value: requestPayment + SubResourceRequestPayment SubResourceType = "requestPayment" +) + +// objectKeyType defines the objectKey value +type objectKeyType string + +const ( + // objectKeyExtensionPolicy objectKey value: v1/extension_policy + objectKeyExtensionPolicy objectKeyType = "v1/extension_policy" + + // objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs + objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs" +) + +// AclType defines bucket/object acl type +type AclType string + +const ( + AclPrivate AclType = "private" + AclPublicRead AclType = "public-read" + AclPublicReadWrite AclType = "public-read-write" + AclAuthenticatedRead AclType = "authenticated-read" + AclBucketOwnerRead AclType = "bucket-owner-read" + AclBucketOwnerFullControl AclType = "bucket-owner-full-control" + AclLogDeliveryWrite AclType = "log-delivery-write" + AclPublicReadDelivery AclType = "public-read-delivered" + AclPublicReadWriteDelivery AclType = "public-read-write-delivered" +) + +// StorageClassType defines bucket storage class +type StorageClassType string + +const ( + //StorageClassStandard storage class: STANDARD + StorageClassStandard StorageClassType = "STANDARD" + + //StorageClassWarm storage class: WARM + StorageClassWarm StorageClassType = "WARM" + + //StorageClassCold storage class: COLD + StorageClassCold StorageClassType = "COLD" + + storageClassStandardIA StorageClassType = "STANDARD_IA" + storageClassGlacier StorageClassType = "GLACIER" +) + +// PermissionType defines permission type +type PermissionType string + +const ( + // PermissionRead permission type: READ + PermissionRead PermissionType = "READ" + + // PermissionWrite permission type: WRITE + PermissionWrite PermissionType = "WRITE" + + // PermissionReadAcp permission type: READ_ACP + PermissionReadAcp PermissionType = "READ_ACP" + + // PermissionWriteAcp permission type: WRITE_ACP + PermissionWriteAcp PermissionType = "WRITE_ACP" + + // PermissionFullControl permission type: FULL_CONTROL + PermissionFullControl PermissionType = "FULL_CONTROL" +) + +// GranteeType defines grantee type +type GranteeType string + +const ( + // GranteeGroup grantee type: Group + GranteeGroup GranteeType = "Group" + + // GranteeUser grantee type: CanonicalUser + GranteeUser GranteeType = "CanonicalUser" +) + +// GroupUriType defines grantee uri type +type GroupUriType string + +const ( + // GroupAllUsers grantee uri type: AllUsers + GroupAllUsers GroupUriType = "AllUsers" + + // GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers + GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers" + + // GroupLogDelivery grantee uri type: LogDelivery + GroupLogDelivery GroupUriType = "LogDelivery" +) + +// VersioningStatusType defines bucket version status +type VersioningStatusType string + +const ( + // VersioningStatusEnabled version status: Enabled + VersioningStatusEnabled VersioningStatusType = "Enabled" + + // VersioningStatusSuspended version status: Suspended + VersioningStatusSuspended VersioningStatusType = "Suspended" +) + +// ProtocolType defines protocol type +type ProtocolType string + +const ( + // ProtocolHttp prorocol type: http + ProtocolHttp ProtocolType = "http" + + // ProtocolHttps prorocol type: https + ProtocolHttps ProtocolType = "https" +) + +// RuleStatusType defines lifeCycle rule status +type RuleStatusType string + +const ( + // RuleStatusEnabled rule status: Enabled + RuleStatusEnabled RuleStatusType = "Enabled" + + // RuleStatusDisabled rule status: Disabled + RuleStatusDisabled RuleStatusType = "Disabled" +) + +// RestoreTierType defines restore options +type RestoreTierType string + +const ( + // RestoreTierExpedited restore options: Expedited + RestoreTierExpedited RestoreTierType = "Expedited" + + // RestoreTierStandard restore options: Standard + RestoreTierStandard RestoreTierType = "Standard" + + // RestoreTierBulk restore options: Bulk + RestoreTierBulk RestoreTierType = "Bulk" +) + +// MetadataDirectiveType defines metadata operation indicator +type MetadataDirectiveType string + +const ( + // CopyMetadata metadata operation: COPY + CopyMetadata MetadataDirectiveType = "COPY" + + // ReplaceNew metadata operation: REPLACE_NEW + ReplaceNew MetadataDirectiveType = "REPLACE_NEW" + + // ReplaceMetadata metadata operation: REPLACE + ReplaceMetadata MetadataDirectiveType = "REPLACE" +) + +// EventType defines bucket notification type of events +type EventType string + +const ( + // ObjectCreatedAll type of events: ObjectCreated:* + ObjectCreatedAll EventType = "ObjectCreated:*" + + // ObjectCreatedPut type of events: ObjectCreated:Put + ObjectCreatedPut EventType = "ObjectCreated:Put" + + // ObjectCreatedPost type of events: ObjectCreated:Post + ObjectCreatedPost EventType = "ObjectCreated:Post" + + // ObjectCreatedCopy type of events: ObjectCreated:Copy + ObjectCreatedCopy EventType = "ObjectCreated:Copy" + + // ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload + ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload" + + // ObjectRemovedAll type of events: ObjectRemoved:* + ObjectRemovedAll EventType = "ObjectRemoved:*" + + // ObjectRemovedDelete type of events: ObjectRemoved:Delete + ObjectRemovedDelete EventType = "ObjectRemoved:Delete" + + // ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated + ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated" +) + +// PayerType defines type of payer +type PayerType string + +const ( + // BucketOwnerPayer type of payer: BucketOwner + BucketOwnerPayer PayerType = "BucketOwner" + + // RequesterPayer type of payer: Requester + RequesterPayer PayerType = "Requester" + + // Requester header for requester-Pays + Requester PayerType = "requester" +) + +// FetchPolicyStatusType defines type of fetch policy status +type FetchPolicyStatusType string + +const ( + // FetchStatusOpen type of status: open + FetchStatusOpen FetchPolicyStatusType = "open" + + // FetchStatusClosed type of status: closed + FetchStatusClosed FetchPolicyStatusType = "closed" +) diff --git a/modules/obs/convert.go b/modules/obs/convert.go new file mode 100755 index 000000000..bd859556b --- /dev/null +++ b/modules/obs/convert.go @@ -0,0 +1,880 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +func cleanHeaderPrefix(header http.Header) map[string][]string { + responseHeaders := make(map[string][]string) + for key, value := range header { + if len(value) > 0 { + key = strings.ToLower(key) + if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) { + key = key[len(HEADER_PREFIX):] + } + responseHeaders[key] = value + } + } + return responseHeaders +} + +// ParseStringToEventType converts string value to EventType value and returns it +func ParseStringToEventType(value string) (ret EventType) { + switch value { + case "ObjectCreated:*", "s3:ObjectCreated:*": + ret = ObjectCreatedAll + case "ObjectCreated:Put", "s3:ObjectCreated:Put": + ret = ObjectCreatedPut + case "ObjectCreated:Post", "s3:ObjectCreated:Post": + ret = ObjectCreatedPost + case "ObjectCreated:Copy", "s3:ObjectCreated:Copy": + ret = ObjectCreatedCopy + case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload": + ret = ObjectCreatedCompleteMultipartUpload + case "ObjectRemoved:*", "s3:ObjectRemoved:*": + ret = ObjectRemovedAll + case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete": + ret = ObjectRemovedDelete + case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated": + ret = ObjectRemovedDeleteMarkerCreated + default: + ret = "" + } + return +} + +// ParseStringToStorageClassType converts string value to StorageClassType value and returns it +func ParseStringToStorageClassType(value string) (ret StorageClassType) { + switch value { + case "STANDARD": + ret = StorageClassStandard + case "STANDARD_IA", "WARM": + ret = StorageClassWarm + case "GLACIER", "COLD": + ret = StorageClassCold + default: + ret = "" + } + return +} + +func prepareGrantURI(grant Grant) string { + if grant.Grantee.URI == GroupAllUsers || grant.Grantee.URI == GroupAuthenticatedUsers { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/global/", grant.Grantee.URI) + } + if grant.Grantee.URI == GroupLogDelivery { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/s3/", grant.Grantee.URI) + } + return fmt.Sprintf("%s", grant.Grantee.URI) +} + +func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string { + xml := make([]string, 0, 4) + + if grant.Grantee.Type == GranteeUser { + if isObs { + xml = append(xml, "") + } else { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + } + if grant.Grantee.ID != "" { + granteeID := XmlTranscoding(grant.Grantee.ID) + xml = append(xml, fmt.Sprintf("%s", granteeID)) + } + if !isObs && grant.Grantee.DisplayName != "" { + granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName) + xml = append(xml, fmt.Sprintf("%s", granteeDisplayName)) + } + xml = append(xml, "") + } else { + if !isObs { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + xml = append(xml, prepareGrantURI(grant)) + xml = append(xml, "") + } else if grant.Grantee.URI == GroupAllUsers { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("Everyone")) + xml = append(xml, "") + } else { + return strings.Join(xml, "") + } + } + + xml = append(xml, fmt.Sprintf("%s", grant.Permission)) + if isObs && isBucket { + xml = append(xml, fmt.Sprintf("%t", grant.Delivered)) + } + xml = append(xml, fmt.Sprintf("")) + return strings.Join(xml, "") +} + +func hasLoggingTarget(input BucketLoggingStatus) bool { + if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 { + return true + } + return false +} + +// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it +func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) { + grantsLength := len(input.TargetGrants) + xml := make([]string, 0, 8+grantsLength) + + xml = append(xml, "") + if isObs && input.Agency != "" { + agency := XmlTranscoding(input.Agency) + xml = append(xml, fmt.Sprintf("%s", agency)) + } + if hasLoggingTarget(input) { + xml = append(xml, "") + if input.TargetBucket != "" { + xml = append(xml, fmt.Sprintf("%s", input.TargetBucket)) + } + if input.TargetPrefix != "" { + targetPrefix := XmlTranscoding(input.TargetPrefix) + xml = append(xml, fmt.Sprintf("%s", targetPrefix)) + } + if grantsLength > 0 { + xml = append(xml, "") + for _, grant := range input.TargetGrants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + } + + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it +func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + if isObs && input.Delivered != "" { + objectDelivered := XmlTranscoding(input.Delivered) + xml = append(xml, fmt.Sprintf("%s", objectDelivered)) + } else { + xml = append(xml, "") + } + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + + xml = append(xml, "") + + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, true)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertConditionToXML(condition Condition) string { + xml := make([]string, 0, 2) + if condition.KeyPrefixEquals != "" { + keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals) + xml = append(xml, fmt.Sprintf("%s", keyPrefixEquals)) + } + if condition.HttpErrorCodeReturnedEquals != "" { + xml = append(xml, fmt.Sprintf("%s", condition.HttpErrorCodeReturnedEquals)) + } + if len(xml) > 0 { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func prepareRoutingRule(input BucketWebsiteConfiguration) string { + xml := make([]string, 0, len(input.RoutingRules)*10) + for _, routingRule := range input.RoutingRules { + xml = append(xml, "") + xml = append(xml, "") + if routingRule.Redirect.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.Protocol)) + } + if routingRule.Redirect.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HostName)) + } + if routingRule.Redirect.ReplaceKeyPrefixWith != "" { + replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyPrefixWith)) + } + + if routingRule.Redirect.ReplaceKeyWith != "" { + replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyWith)) + } + if routingRule.Redirect.HttpRedirectCode != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HttpRedirectCode)) + } + xml = append(xml, "") + + if ret := convertConditionToXML(routingRule.Condition); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + return strings.Join(xml, "") +} + +// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it +func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) { + routingRuleLength := len(input.RoutingRules) + xml := make([]string, 0, 6+routingRuleLength*10) + xml = append(xml, "") + + if input.RedirectAllRequestsTo.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.HostName)) + if input.RedirectAllRequestsTo.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.Protocol)) + } + xml = append(xml, "") + } else { + if input.IndexDocument.Suffix != "" { + indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix) + xml = append(xml, fmt.Sprintf("%s", indexDocumentSuffix)) + } + if input.ErrorDocument.Key != "" { + errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key) + xml = append(xml, fmt.Sprintf("%s", errorDocumentKey)) + } + if routingRuleLength > 0 { + xml = append(xml, "") + xml = append(xml, prepareRoutingRule(input)) + xml = append(xml, "") + } + } + + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertTransitionsToXML(transitions []Transition, isObs bool) string { + if length := len(transitions); length > 0 { + xml := make([]string, 0, length) + for _, transition := range transitions { + var temp string + if transition.Days > 0 { + temp = fmt.Sprintf("%d", transition.Days) + } else if !transition.Date.IsZero() { + temp = fmt.Sprintf("%s", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + if temp != "" { + if !isObs { + storageClass := string(transition.StorageClass) + if transition.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if transition.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + xml = append(xml, fmt.Sprintf("%s%s", temp, storageClass)) + } else { + xml = append(xml, fmt.Sprintf("%s%s", temp, transition.StorageClass)) + } + } + } + return strings.Join(xml, "") + } + return "" +} + +func convertExpirationToXML(expiration Expiration) string { + if expiration.Days > 0 { + return fmt.Sprintf("%d", expiration.Days) + } else if !expiration.Date.IsZero() { + return fmt.Sprintf("%s", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + return "" +} +func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string { + if length := len(noncurrentVersionTransitions); length > 0 { + xml := make([]string, 0, length) + for _, noncurrentVersionTransition := range noncurrentVersionTransitions { + if noncurrentVersionTransition.NoncurrentDays > 0 { + storageClass := string(noncurrentVersionTransition.StorageClass) + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + xml = append(xml, fmt.Sprintf("%d"+ + "%s", + noncurrentVersionTransition.NoncurrentDays, storageClass)) + } + } + return strings.Join(xml, "") + } + return "" +} +func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string { + if noncurrentVersionExpiration.NoncurrentDays > 0 { + return fmt.Sprintf("%d", noncurrentVersionExpiration.NoncurrentDays) + } + return "" +} + +// ConvertLifecyleConfigurationToXml converts BucketLifecyleConfiguration value to XML data and returns it +func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.LifecycleRules)*9) + xml = append(xml, "") + for _, lifecyleRule := range input.LifecycleRules { + xml = append(xml, "") + if lifecyleRule.ID != "" { + lifecyleRuleID := XmlTranscoding(lifecyleRule.ID) + xml = append(xml, fmt.Sprintf("%s", lifecyleRuleID)) + } + lifecyleRulePrefix := XmlTranscoding(lifecyleRule.Prefix) + xml = append(xml, fmt.Sprintf("%s", lifecyleRulePrefix)) + xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Status)) + if ret := convertTransitionsToXML(lifecyleRule.Transitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertExpirationToXML(lifecyleRule.Expiration); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionTransitionsToXML(lifecyleRule.NoncurrentVersionTransitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionExpirationToXML(lifecyleRule.NoncurrentVersionExpiration); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string { + if length := len(filterRules); length > 0 { + xml := make([]string, 0, length*4) + for _, filterRule := range filterRules { + xml = append(xml, "") + if filterRule.Name != "" { + filterRuleName := XmlTranscoding(filterRule.Name) + xml = append(xml, fmt.Sprintf("%s", filterRuleName)) + } + if filterRule.Value != "" { + filterRuleValue := XmlTranscoding(filterRule.Value) + xml = append(xml, fmt.Sprintf("%s", filterRuleValue)) + } + xml = append(xml, "") + } + if !isObs { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func converntEventsToXML(events []EventType, isObs bool) string { + if length := len(events); length > 0 { + xml := make([]string, 0, length) + if !isObs { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s%s", "s3:", event)) + } + } else { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s", event)) + } + } + return strings.Join(xml, "") + } + return "" +} + +func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string { + xml := make([]string, 0, 6) + xml = append(xml, xmlElem) + if topicConfiguration.ID != "" { + topicConfigurationID := XmlTranscoding(topicConfiguration.ID) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationID)) + } + topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationTopic)) + + if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" { + xml = append(xml, ret) + } + tempElem := xmlElem[0:1] + "/" + xmlElem[1:] + xml = append(xml, tempElem) + return strings.Join(xml, "") +} + +// ConverntObsRestoreToXml converts RestoreObjectInput value to XML data and returns it +func ConverntObsRestoreToXml(restoreObjectInput RestoreObjectInput) string { + xml := make([]string, 0, 2) + xml = append(xml, fmt.Sprintf("%d", restoreObjectInput.Days)) + if restoreObjectInput.Tier != "Bulk" { + xml = append(xml, fmt.Sprintf("%s", restoreObjectInput.Tier)) + } + xml = append(xml, fmt.Sprintf("")) + data := strings.Join(xml, "") + return data +} + +// ConvertNotificationToXml converts BucketNotification value to XML data and returns it +func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.TopicConfigurations)*6) + xml = append(xml, "") + for _, topicConfiguration := range input.TopicConfigurations { + ret := converntConfigureToXML(topicConfiguration, "", isObs) + xml = append(xml, ret) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it +func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.Parts)*4) + xml = append(xml, "") + for _, part := range input.Parts { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("%d", part.PartNumber)) + xml = append(xml, fmt.Sprintf("%s", part.ETag)) + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) { + if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok { + sseCHeader := SseCHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok { + sseCHeader.KeyMD5 = ret[0] + } + sseHeader = sseCHeader + } else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok { + sseKmsHeader := SseKmsHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok { + sseKmsHeader.Key = ret[0] + } else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok { + sseKmsHeader.Key = ret[0] + } + sseHeader = sseKmsHeader + } + return +} + +func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) { + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok { + AllowOrigin = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok { + AllowHeader = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok { + MaxAgeSeconds = StringToInt(ret[0], 0) + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok { + AllowMethod = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok { + ExposeHeader = ret[0] + } + return +} + +func parseUnCommonHeader(output *GetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok { + output.Expiration = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok { + output.Restore = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok { + output.ObjectType = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok { + output.NextAppendPosition = ret[0] + } +} + +// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers +func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + parseUnCommonHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } + + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok { + ret, err := time.Parse(time.RFC1123, ret[0]) + if err == nil { + output.LastModified = ret + } + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok { + output.ContentLength = StringToInt64(ret[0], 0) + } + + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } + +} + +// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers +func ParseCopyObjectOutput(output *CopyObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok { + output.CopySourceVersionId = ret[0] + } +} + +// ParsePutObjectOutput sets PutObjectOutput field values with response headers +func ParsePutObjectOutput(output *PutObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers +func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseUploadPartOutput sets UploadPartOutput field values with response headers +func ParseUploadPartOutput(output *UploadPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers +func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } +} + +// ParseCopyPartOutput sets CopyPartOutput field values with response headers +func ParseCopyPartOutput(output *CopyPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers +func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok { + output.Version = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = ret[0] + } else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok { + output.Location = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok { + output.Epid = ret[0] + } +} + +func parseContentHeader(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } +} + +// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers +func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok { + output.MetadataDirective = MetadataDirectiveType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + parseContentHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } +} + +// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers +func ParseDeleteObjectOutput(output *DeleteObjectOutput) { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + + if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = deleteMarker[0] == "true" + } +} + +// ParseGetObjectOutput sets GetObjectOutput field values with response headers +func ParseGetObjectOutput(output *GetObjectOutput) { + ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput) + if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = ret[0] == "true" + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } +} + +// ConvertRequestToIoReaderV2 converts req to XML data +func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) { + data, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data)) + } + return bytes.NewReader(data), Base64Md5(data), nil + } + return nil, "", err +} + +// ConvertRequestToIoReader converts req to XML data +func ConvertRequestToIoReader(req interface{}) (io.Reader, error) { + body, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body)) + } + return bytes.NewReader(body), nil + } + return nil, err +} + +// ParseResponseToBaseModel gets response from OBS +func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) { + readCloser, ok := baseModel.(IReadCloser) + if !ok { + defer func() { + errMsg := resp.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body") + } + }() + body, err := ioutil.ReadAll(resp.Body) + if err == nil && len(body) > 0 { + if xmlResult { + err = ParseXml(body, baseModel) + } else { + s := reflect.TypeOf(baseModel).Elem() + if reflect.TypeOf(baseModel).Elem().Name() == "GetBucketPolicyOutput" { + for i := 0; i < s.NumField(); i++ { + if s.Field(i).Tag == "json:\"body\"" { + reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body)) + break + } + } + } else { + err = parseJSON(body, baseModel) + } + } + if err != nil { + doLog(LEVEL_ERROR, "Unmarshal error: %v", err) + } + } + } else { + readCloser.setReadCloser(resp.Body) + } + + baseModel.setStatusCode(resp.StatusCode) + responseHeaders := cleanHeaderPrefix(resp.Header) + baseModel.setResponseHeaders(responseHeaders) + if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok { + baseModel.setRequestID(values[0]) + } + return +} + +// ParseResponseToObsError gets obsError from OBS +func ParseResponseToObsError(resp *http.Response, isObs bool) error { + isJson := false + if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok { + jsonType, _ := mimeTypes["json"] + isJson = contentType[0] == jsonType + } + obsError := ObsError{} + respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + obsError.Status = resp.Status + return obsError +} + +// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format +func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) { + fetch := map[string]SetBucketFetchPolicyInput{"fetch": input} + json, err := json.Marshal(fetch) + if err != nil { + return "", err + } + data = string(json) + return +} + +// convertFetchJobToJSON converts SetBucketFetchJobInput into json format +func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) { + objectHeaders := make(map[string]string) + for key, value := range input.ObjectHeaders { + if value != "" { + _key := strings.ToLower(key) + if !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + _key = HEADER_PREFIX_META_OBS + _key + } + objectHeaders[_key] = value + } + } + input.ObjectHeaders = objectHeaders + json, err := json.Marshal(input) + if err != nil { + return "", err + } + data = string(json) + return +} diff --git a/modules/obs/error.go b/modules/obs/error.go new file mode 100755 index 000000000..63cb5bb03 --- /dev/null +++ b/modules/obs/error.go @@ -0,0 +1,35 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "encoding/xml" + "fmt" +) + +// ObsError defines error response from OBS +type ObsError struct { + BaseModel + Status string + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code" json:"code"` + Message string `xml:"Message" json:"message"` + Resource string `xml:"Resource"` + HostId string `xml:"HostId"` +} + +func (err ObsError) Error() string { + return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s", + err.Status, err.Code, err.Message, err.RequestId) +} diff --git a/modules/obs/extension.go b/modules/obs/extension.go new file mode 100755 index 000000000..bbf33c56b --- /dev/null +++ b/modules/obs/extension.go @@ -0,0 +1,37 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "fmt" + "strings" +) + +type extensionOptions interface{} +type extensionHeaders func(headers map[string][]string, isObs bool) error + +func setHeaderPrefix(key string, value string) extensionHeaders { + return func(headers map[string][]string, isObs bool) error { + if strings.TrimSpace(value) == "" { + return fmt.Errorf("set header %s with empty value", key) + } + setHeaders(headers, key, []string{value}, isObs) + return nil + } +} + +// WithReqPaymentHeader sets header for requester-pays +func WithReqPaymentHeader(requester PayerType) extensionHeaders { + return setHeaderPrefix(REQUEST_PAYER, string(requester)) +} diff --git a/modules/obs/http.go b/modules/obs/http.go new file mode 100755 index 000000000..e305c14b5 --- /dev/null +++ b/modules/obs/http.go @@ -0,0 +1,566 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string { + _headers := make(map[string][]string, len(headers)) + if headers != nil { + for key, value := range headers { + key = strings.TrimSpace(key) + if key == "" { + continue + } + _key := strings.ToLower(key) + if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + if !meta { + continue + } + if !isObs { + _key = HEADER_PREFIX_META + _key + } else { + _key = HEADER_PREFIX_META_OBS + _key + } + } else { + _key = key + } + _headers[_key] = value + } + } + return _headers +} + +func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient.doAction(action, method, "", "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions) +} + +func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions) +} + +func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error { + + var resp *http.Response + var respError error + doLog(LEVEL_INFO, "Enter method %s...", action) + start := GetCurrentTimestamp() + + params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs) + if err != nil { + return err + } + + if params == nil { + params = make(map[string]string) + } + + if headers == nil { + headers = make(map[string][]string) + } + + for _, extension := range extensions { + if extensionHeader, ok := extension.(extensionHeaders); ok { + _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err)) + } + } else { + doLog(LEVEL_WARN, "Unsupported extensionOptions") + } + } + + switch method { + case HTTP_GET: + resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_POST: + resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_PUT: + resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_DELETE: + resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_HEAD: + resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_OPTIONS: + resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable) + default: + respError = errors.New("Unexpect http method error") + } + if respError == nil && output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } else { + doLog(LEVEL_WARN, "Do http request with error: %v", respError) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + + return respError +} + +func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) { + req, err := http.NewRequest(method, signedURL, data) + if err != nil { + return err + } + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + var resp *http.Response + + var isSecurityToken bool + var securityToken string + var query []string + parmas := strings.Split(signedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:] + isSecurityToken = true + } + } + } + } + logSignedURL := signedURL + if isSecurityToken { + logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1) + } + doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL) + + req.Header = actualSignedRequestHeaders + if value, ok := req.Header[HEADER_HOST_CAMEL]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST_CAMEL) + } else if value, ok := req.Header[HEADER_HOST]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST) + } + + if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL) + } else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH) + } + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + } + + var msg interface{} + if err != nil { + respError = err + resp = nil + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode >= 300 { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + msg = resp.Status + resp = nil + } else { + if output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + } + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } + } + + if msg != nil { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + + return +} + +func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) { + + bucketName = strings.TrimSpace(bucketName) + + method = strings.ToUpper(method) + + var redirectURL string + var requestURL string + maxRetryCount := obsClient.conf.maxRetryCount + maxRedirectCount := obsClient.conf.maxRedirectCount + + var _data io.Reader + if data != nil { + if dataStr, ok := data.(string); ok { + doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr) + headers["Content-Length"] = []string{IntToString(len(dataStr))} + _data = strings.NewReader(dataStr) + } else if dataByte, ok := data.([]byte); ok { + doLog(LEVEL_DEBUG, "Do http request with byte array") + headers["Content-Length"] = []string{IntToString(len(dataByte))} + _data = bytes.NewReader(dataByte) + } else if dataReader, ok := data.(io.Reader); ok { + _data = dataReader + } else { + doLog(LEVEL_WARN, "Data is not a valid io.Reader") + return nil, errors.New("Data is not a valid io.Reader") + } + } + + var lastRequest *http.Request + redirectFlag := false + for i, redirectCount := 0, 0; i <= maxRetryCount; i++ { + if redirectURL != "" { + if !redirectFlag { + parsedRedirectURL, err := url.Parse(redirectURL) + if err != nil { + return nil, err + } + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host) + if err != nil { + return nil, err + } + if parsedRequestURL, err := url.Parse(requestURL); err != nil { + return nil, err + } else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" { + redirectURL += "?" + parsedRequestURL.RawQuery + } + } + requestURL = redirectURL + } else { + var err error + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "") + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, requestURL, _data) + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + if err != nil { + return nil, err + } + doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method) + + if isDebugLogEnabled() { + auth := headers[HEADER_AUTH_CAMEL] + delete(headers, HEADER_AUTH_CAMEL) + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken { + headers[HEADER_STS_TOKEN_AMZ] = []string{"******"} + } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken { + headers[HEADER_STS_TOKEN_OBS] = []string{"******"} + } + doLog(LEVEL_DEBUG, "Request headers: %v", headers) + headers[HEADER_AUTH_CAMEL] = auth + if isSecurityToken { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = securityToken + } else { + headers[HEADER_STS_TOKEN_AMZ] = securityToken + } + } + } + + for key, value := range headers { + if key == HEADER_HOST_CAMEL { + req.Host = value[0] + delete(headers, key) + } else if key == HEADER_CONTENT_LENGTH_CAMEL { + req.ContentLength = StringToInt64(value[0], -1) + delete(headers, key) + } else { + req.Header[key] = value + } + } + + lastRequest = req + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + + if lastRequest != nil { + req.Host = lastRequest.Host + req.ContentLength = lastRequest.ContentLength + } + + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + } + + var msg interface{} + if err != nil { + msg = err + respError = err + resp = nil + if !repeatable { + break + } + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode < 300 { + break + } else if !repeatable || (resp.StatusCode >= 400 && resp.StatusCode < 500) || resp.StatusCode == 304 { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } else if resp.StatusCode >= 300 && resp.StatusCode < 400 { + if location := resp.Header.Get(HEADER_LOCATION_CAMEL); location != "" && redirectCount < maxRedirectCount { + redirectURL = location + doLog(LEVEL_WARN, "Redirect request to %s", redirectURL) + msg = resp.Status + maxRetryCount++ + redirectCount++ + if resp.StatusCode == 302 && method == HTTP_GET { + redirectFlag = true + } else { + redirectFlag = false + } + } else { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } + } else { + msg = resp.Status + } + } + if i != maxRetryCount { + if resp != nil { + _err := resp.Body.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close resp body") + } + resp = nil + } + if _, ok := headers[HEADER_AUTH_CAMEL]; ok { + delete(headers, HEADER_AUTH_CAMEL) + } + doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg) + if r, ok := _data.(*strings.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*bytes.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*fileReaderWrapper); ok { + fd, err := os.Open(r.filePath) + if err != nil { + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close with reason: %v", errMsg) + } + }() + fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath} + fileReaderWrapper.mark = r.mark + fileReaderWrapper.reader = fd + fileReaderWrapper.totalCount = r.totalCount + _data = fileReaderWrapper + _, err = fd.Seek(r.mark, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*readerWrapper); ok { + _, err := r.seek(0, 0) + if err != nil { + return nil, err + } + } + time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second))) + } else { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + if resp != nil { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + } + } + } + return +} + +type connDelegate struct { + conn net.Conn + socketTimeout time.Duration + finalTimeout time.Duration +} + +func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate { + return &connDelegate{ + conn: conn, + socketTimeout: time.Second * time.Duration(socketTimeout), + finalTimeout: time.Second * time.Duration(finalTimeout), + } +} + +func (delegate *connDelegate) Read(b []byte) (n int, err error) { + setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + + n, err = delegate.conn.Read(b) + setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout)) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Write(b []byte) (n int, err error) { + setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + + n, err = delegate.conn.Write(b) + finalTimeout := time.Now().Add(delegate.finalTimeout) + setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout) + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Close() error { + return delegate.conn.Close() +} + +func (delegate *connDelegate) LocalAddr() net.Addr { + return delegate.conn.LocalAddr() +} + +func (delegate *connDelegate) RemoteAddr() net.Addr { + return delegate.conn.RemoteAddr() +} + +func (delegate *connDelegate) SetDeadline(t time.Time) error { + return delegate.conn.SetDeadline(t) +} + +func (delegate *connDelegate) SetReadDeadline(t time.Time) error { + return delegate.conn.SetReadDeadline(t) +} + +func (delegate *connDelegate) SetWriteDeadline(t time.Time) error { + return delegate.conn.SetWriteDeadline(t) +} diff --git a/modules/obs/log.go b/modules/obs/log.go new file mode 100755 index 000000000..8938e5e40 --- /dev/null +++ b/modules/obs/log.go @@ -0,0 +1,317 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +// Level defines the level of the log +type Level int + +const ( + LEVEL_OFF Level = 500 + LEVEL_ERROR Level = 400 + LEVEL_WARN Level = 300 + LEVEL_INFO Level = 200 + LEVEL_DEBUG Level = 100 +) + +var logLevelMap = map[Level]string{ + LEVEL_OFF: "[OFF]: ", + LEVEL_ERROR: "[ERROR]: ", + LEVEL_WARN: "[WARN]: ", + LEVEL_INFO: "[INFO]: ", + LEVEL_DEBUG: "[DEBUG]: ", +} + +type logConfType struct { + level Level + logToConsole bool + logFullPath string + maxLogSize int64 + backups int +} + +func getDefaultLogConf() logConfType { + return logConfType{ + level: LEVEL_WARN, + logToConsole: false, + logFullPath: "", + maxLogSize: 1024 * 1024 * 30, //30MB + backups: 10, + } +} + +var logConf logConfType + +type loggerWrapper struct { + fullPath string + fd *os.File + ch chan string + wg sync.WaitGroup + queue []string + logger *log.Logger + index int + cacheCount int + closed bool +} + +func (lw *loggerWrapper) doInit() { + lw.queue = make([]string, 0, lw.cacheCount) + lw.logger = log.New(lw.fd, "", 0) + lw.ch = make(chan string, lw.cacheCount) + lw.wg.Add(1) + go lw.doWrite() +} + +func (lw *loggerWrapper) rotate() { + stat, err := lw.fd.Stat() + if err != nil { + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + panic(err) + } + if stat.Size() >= logConf.maxLogSize { + _err := lw.fd.Sync() + if _err != nil { + panic(err) + } + _err = lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + if lw.index > logConf.backups { + lw.index = 1 + } + _err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index)) + if _err != nil { + panic(err) + } + lw.index++ + + fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + panic(err) + } + lw.fd = fd + lw.logger.SetOutput(lw.fd) + } +} + +func (lw *loggerWrapper) doFlush() { + lw.rotate() + for _, m := range lw.queue { + lw.logger.Println(m) + } + err := lw.fd.Sync() + if err != nil { + panic(err) + } +} + +func (lw *loggerWrapper) doClose() { + lw.closed = true + close(lw.ch) + lw.wg.Wait() +} + +func (lw *loggerWrapper) doWrite() { + defer lw.wg.Done() + for { + msg, ok := <-lw.ch + if !ok { + lw.doFlush() + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + break + } + if len(lw.queue) >= lw.cacheCount { + lw.doFlush() + lw.queue = make([]string, 0, lw.cacheCount) + } + lw.queue = append(lw.queue, msg) + } + +} + +func (lw *loggerWrapper) Printf(format string, v ...interface{}) { + if !lw.closed { + msg := fmt.Sprintf(format, v...) + lw.ch <- msg + } +} + +var consoleLogger *log.Logger +var fileLogger *loggerWrapper +var lock = new(sync.RWMutex) + +func isDebugLogEnabled() bool { + return logConf.level <= LEVEL_DEBUG +} + +func isErrorLogEnabled() bool { + return logConf.level <= LEVEL_ERROR +} + +func isWarnLogEnabled() bool { + return logConf.level <= LEVEL_WARN +} + +func isInfoLogEnabled() bool { + return logConf.level <= LEVEL_INFO +} + +func reset() { + if fileLogger != nil { + fileLogger.doClose() + fileLogger = nil + } + consoleLogger = nil + logConf = getDefaultLogConf() +} + +// InitLog enable logging function with default cacheCnt +func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error { + return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50) +} + +// InitLogWithCacheCnt enable logging function +func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error { + lock.Lock() + defer lock.Unlock() + if cacheCnt <= 0 { + cacheCnt = 50 + } + reset() + if fullPath := strings.TrimSpace(logFullPath); fullPath != "" { + _fullPath, err := filepath.Abs(fullPath) + if err != nil { + return err + } + + if !strings.HasSuffix(_fullPath, ".log") { + _fullPath += ".log" + } + + stat, err := os.Stat(_fullPath) + if err == nil && stat.IsDir() { + return fmt.Errorf("logFullPath:[%s] is a directory", _fullPath) + } else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil { + return err + } + + fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return err + } + + if stat == nil { + stat, err = os.Stat(_fullPath) + if err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return err + } + } + + prefix := stat.Name() + "." + index := 1 + var timeIndex int64 = 0 + walkFunc := func(path string, info os.FileInfo, err error) error { + if err == nil { + if name := info.Name(); strings.HasPrefix(name, prefix) { + if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex { + timeIndex = info.ModTime().Unix() + index = i + 1 + } + } + } + return err + } + + if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return err + } + + fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false} + fileLogger.doInit() + } + if maxLogSize > 0 { + logConf.maxLogSize = maxLogSize + } + if backups > 0 { + logConf.backups = backups + } + logConf.level = level + if logToConsole { + consoleLogger = log.New(os.Stdout, "", log.LstdFlags) + } + return nil +} + +// CloseLog disable logging and synchronize cache data to log files +func CloseLog() { + if logEnabled() { + lock.Lock() + defer lock.Unlock() + reset() + } +} + +func logEnabled() bool { + return consoleLogger != nil || fileLogger != nil +} + +// DoLog writes log messages to the logger +func DoLog(level Level, format string, v ...interface{}) { + doLog(level, format, v...) +} + +func doLog(level Level, format string, v ...interface{}) { + if logEnabled() && logConf.level <= level { + msg := fmt.Sprintf(format, v...) + if _, file, line, ok := runtime.Caller(1); ok { + index := strings.LastIndex(file, "/") + if index >= 0 { + file = file[index+1:] + } + msg = fmt.Sprintf("%s:%d|%s", file, line, msg) + } + prefix := logLevelMap[level] + if consoleLogger != nil { + consoleLogger.Printf("%s%s", prefix, msg) + } + if fileLogger != nil { + nowDate := FormatUtcNow("2006-01-02T15:04:05Z") + fileLogger.Printf("%s %s%s", nowDate, prefix, msg) + } + } +} diff --git a/modules/obs/model.go b/modules/obs/model.go new file mode 100755 index 000000000..8752b5198 --- /dev/null +++ b/modules/obs/model.go @@ -0,0 +1,1236 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "encoding/xml" + "io" + "net/http" + "time" +) + +// BaseModel defines base model response from OBS +type BaseModel struct { + StatusCode int `xml:"-"` + RequestId string `xml:"RequestId" json:"request_id"` + ResponseHeaders map[string][]string `xml:"-"` +} + +// Bucket defines bucket properties +type Bucket struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` + Location string `xml:"Location"` +} + +// Owner defines owner properties +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// Initiator defines initiator properties +type Initiator struct { + XMLName xml.Name `xml:"Initiator"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// ListBucketsInput is the input parameter of ListBuckets function +type ListBucketsInput struct { + QueryLocation bool +} + +// ListBucketsOutput is the result of ListBuckets function +type ListBucketsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Owner Owner `xml:"Owner"` + Buckets []Bucket `xml:"Buckets>Bucket"` +} + +type bucketLocationObs struct { + XMLName xml.Name `xml:"Location"` + Location string `xml:",chardata"` +} + +// BucketLocation defines bucket location configuration +type BucketLocation struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + Location string `xml:"LocationConstraint,omitempty"` +} + +// CreateBucketInput is the input parameter of CreateBucket function +type CreateBucketInput struct { + BucketLocation + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + StorageClass StorageClassType `xml:"-"` + GrantReadId string `xml:"-"` + GrantWriteId string `xml:"-"` + GrantReadAcpId string `xml:"-"` + GrantWriteAcpId string `xml:"-"` + GrantFullControlId string `xml:"-"` + GrantReadDeliveredId string `xml:"-"` + GrantFullControlDeliveredId string `xml:"-"` + Epid string `xml:"-"` + AvailableZone string `xml:"-"` +} + +// BucketStoragePolicy defines the bucket storage class +type BucketStoragePolicy struct { + XMLName xml.Name `xml:"StoragePolicy"` + StorageClass StorageClassType `xml:"DefaultStorageClass"` +} + +// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function +type SetBucketStoragePolicyInput struct { + Bucket string `xml:"-"` + BucketStoragePolicy +} + +type getBucketStoragePolicyOutputS3 struct { + BaseModel + BucketStoragePolicy +} + +// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function +type GetBucketStoragePolicyOutput struct { + BaseModel + StorageClass string +} + +type bucketStoragePolicyObs struct { + XMLName xml.Name `xml:"StorageClass"` + StorageClass string `xml:",chardata"` +} +type getBucketStoragePolicyOutputObs struct { + BaseModel + bucketStoragePolicyObs +} + +// ListObjsInput defines parameters for listing objects +type ListObjsInput struct { + Prefix string + MaxKeys int + Delimiter string + Origin string + RequestHeader string +} + +// ListObjectsInput is the input parameter of ListObjects function +type ListObjectsInput struct { + ListObjsInput + Bucket string + Marker string +} + +// Content defines the object content properties +type Content struct { + XMLName xml.Name `xml:"Contents"` + Owner Owner `xml:"Owner"` + ETag string `xml:"ETag"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + Size int64 `xml:"Size"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// ListObjectsOutput is the result of ListObjects function +type ListObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListBucketResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Contents []Content `xml:"Contents"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` +} + +// ListVersionsInput is the input parameter of ListVersions function +type ListVersionsInput struct { + ListObjsInput + Bucket string + KeyMarker string + VersionIdMarker string +} + +// Version defines the properties of versioning objects +type Version struct { + DeleteMarker + XMLName xml.Name `xml:"Version"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` +} + +// DeleteMarker defines the properties of versioning delete markers +type DeleteMarker struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + IsLatest bool `xml:"IsLatest"` + LastModified time.Time `xml:"LastModified"` + Owner Owner `xml:"Owner"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// ListVersionsOutput is the result of ListVersions function +type ListVersionsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListVersionsResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + VersionIdMarker string `xml:"VersionIdMarker"` + NextVersionIdMarker string `xml:"NextVersionIdMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Versions []Version `xml:"Version"` + DeleteMarkers []DeleteMarker `xml:"DeleteMarker"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` +} + +// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function +type ListMultipartUploadsInput struct { + Bucket string + Prefix string + MaxUploads int + Delimiter string + KeyMarker string + UploadIdMarker string +} + +// Upload defines multipart upload properties +type Upload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + Initiated time.Time `xml:"Initiated"` + StorageClass StorageClassType `xml:"StorageClass"` + Owner Owner `xml:"Owner"` + Initiator Initiator `xml:"Initiator"` +} + +// ListMultipartUploadsOutput is the result of ListMultipartUploads function +type ListMultipartUploadsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + UploadIdMarker string `xml:"UploadIdMarker"` + NextUploadIdMarker string `xml:"NextUploadIdMarker"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + MaxUploads int `xml:"MaxUploads"` + Prefix string `xml:"Prefix"` + Uploads []Upload `xml:"Upload"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// BucketQuota defines bucket quota configuration +type BucketQuota struct { + XMLName xml.Name `xml:"Quota"` + Quota int64 `xml:"StorageQuota"` +} + +// SetBucketQuotaInput is the input parameter of SetBucketQuota function +type SetBucketQuotaInput struct { + Bucket string `xml:"-"` + BucketQuota +} + +// GetBucketQuotaOutput is the result of GetBucketQuota function +type GetBucketQuotaOutput struct { + BaseModel + BucketQuota +} + +// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function +type GetBucketStorageInfoOutput struct { + BaseModel + XMLName xml.Name `xml:"GetBucketStorageInfoResult"` + Size int64 `xml:"Size"` + ObjectNumber int `xml:"ObjectNumber"` +} + +type getBucketLocationOutputS3 struct { + BaseModel + BucketLocation +} +type getBucketLocationOutputObs struct { + BaseModel + bucketLocationObs +} + +// GetBucketLocationOutput is the result of GetBucketLocation function +type GetBucketLocationOutput struct { + BaseModel + Location string `xml:"-"` +} + +// Grantee defines grantee properties +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI GroupUriType `xml:"URI,omitempty"` +} + +type granteeObs struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + Canned string `xml:"Canned,omitempty"` +} + +// Grant defines grant properties +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} +type grantObs struct { + XMLName xml.Name `xml:"Grant"` + Grantee granteeObs `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} + +// AccessControlPolicy defines access control policy properties +type AccessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []Grant `xml:"AccessControlList>Grant"` + Delivered string `xml:"Delivered,omitempty"` +} + +type accessControlPolicyObs struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []grantObs `xml:"AccessControlList>Grant"` +} + +// GetBucketAclOutput is the result of GetBucketAcl function +type GetBucketAclOutput struct { + BaseModel + AccessControlPolicy +} + +type getBucketACLOutputObs struct { + BaseModel + accessControlPolicyObs +} + +// SetBucketAclInput is the input parameter of SetBucketAcl function +type SetBucketAclInput struct { + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// SetBucketPolicyInput is the input parameter of SetBucketPolicy function +type SetBucketPolicyInput struct { + Bucket string + Policy string +} + +// GetBucketPolicyOutput is the result of GetBucketPolicy function +type GetBucketPolicyOutput struct { + BaseModel + Policy string `json:"body"` +} + +// CorsRule defines the CORS rules +type CorsRule struct { + XMLName xml.Name `xml:"CORSRule"` + ID string `xml:"ID,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin"` + AllowedMethod []string `xml:"AllowedMethod"` + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` +} + +// BucketCors defines the bucket CORS configuration +type BucketCors struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CorsRules []CorsRule `xml:"CORSRule"` +} + +// SetBucketCorsInput is the input parameter of SetBucketCors function +type SetBucketCorsInput struct { + Bucket string `xml:"-"` + BucketCors +} + +// GetBucketCorsOutput is the result of GetBucketCors function +type GetBucketCorsOutput struct { + BaseModel + BucketCors +} + +// BucketVersioningConfiguration defines the versioning configuration +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status VersioningStatusType `xml:"Status"` +} + +// SetBucketVersioningInput is the input parameter of SetBucketVersioning function +type SetBucketVersioningInput struct { + Bucket string `xml:"-"` + BucketVersioningConfiguration +} + +// GetBucketVersioningOutput is the result of GetBucketVersioning function +type GetBucketVersioningOutput struct { + BaseModel + BucketVersioningConfiguration +} + +// IndexDocument defines the default page configuration +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +// ErrorDocument defines the error page configuration +type ErrorDocument struct { + Key string `xml:"Key,omitempty"` +} + +// Condition defines condition in RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` + HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"` +} + +// Redirect defines redirect in RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName,omitempty"` + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` + HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"` +} + +// RoutingRule defines routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + Condition Condition `xml:"Condition,omitempty"` + Redirect Redirect `xml:"Redirect"` +} + +// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration +type RedirectAllRequestsTo struct { + XMLName xml.Name `xml:"RedirectAllRequestsTo"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName"` +} + +// BucketWebsiteConfiguration defines the bucket website configuration +type BucketWebsiteConfiguration struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` +} + +// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function +type SetBucketWebsiteConfigurationInput struct { + Bucket string `xml:"-"` + BucketWebsiteConfiguration +} + +// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function +type GetBucketWebsiteConfigurationOutput struct { + BaseModel + BucketWebsiteConfiguration +} + +// GetBucketMetadataInput is the input parameter of GetBucketMetadata function +type GetBucketMetadataInput struct { + Bucket string + Origin string + RequestHeader string +} + +// SetObjectMetadataInput is the input parameter of SetObjectMetadata function +type SetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +//SetObjectMetadataOutput is the result of SetObjectMetadata function +type SetObjectMetadataOutput struct { + BaseModel + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +// GetBucketMetadataOutput is the result of GetBucketMetadata function +type GetBucketMetadataOutput struct { + BaseModel + StorageClass StorageClassType + Location string + Version string + AllowOrigin string + AllowMethod string + AllowHeader string + MaxAgeSeconds int + ExposeHeader string + Epid string +} + +// BucketLoggingStatus defines the bucket logging configuration +type BucketLoggingStatus struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + Agency string `xml:"Agency,omitempty"` + TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"` + TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"` + TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"` +} + +// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function +type SetBucketLoggingConfigurationInput struct { + Bucket string `xml:"-"` + BucketLoggingStatus +} + +// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function +type GetBucketLoggingConfigurationOutput struct { + BaseModel + BucketLoggingStatus +} + +// Transition defines transition property in LifecycleRule +type Transition struct { + XMLName xml.Name `xml:"Transition"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// Expiration defines expiration property in LifecycleRule +type Expiration struct { + XMLName xml.Name `xml:"Expiration"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` +} + +// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays"` +} + +// LifecycleRule defines lifecycle rule +type LifecycleRule struct { + ID string `xml:"ID,omitempty"` + Prefix string `xml:"Prefix"` + Status RuleStatusType `xml:"Status"` + Transitions []Transition `xml:"Transition,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty"` + NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` +} + +// BucketLifecyleConfiguration defines the bucket lifecycle configuration +type BucketLifecyleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + LifecycleRules []LifecycleRule `xml:"Rule"` +} + +// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function +type SetBucketLifecycleConfigurationInput struct { + Bucket string `xml:"-"` + BucketLifecyleConfiguration +} + +// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function +type GetBucketLifecycleConfigurationOutput struct { + BaseModel + BucketLifecyleConfiguration +} + +// Tag defines tag property in BucketTagging +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BucketTagging defines the bucket tag configuration +type BucketTagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag"` +} + +// SetBucketTaggingInput is the input parameter of SetBucketTagging function +type SetBucketTaggingInput struct { + Bucket string `xml:"-"` + BucketTagging +} + +// GetBucketTaggingOutput is the result of GetBucketTagging function +type GetBucketTaggingOutput struct { + BaseModel + BucketTagging +} + +// FilterRule defines filter rule in TopicConfiguration +type FilterRule struct { + XMLName xml.Name `xml:"FilterRule"` + Name string `xml:"Name,omitempty"` + Value string `xml:"Value,omitempty"` +} + +// TopicConfiguration defines the topic configuration +type TopicConfiguration struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []EventType `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>Object>FilterRule"` +} + +// BucketNotification defines the bucket notification configuration +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"` +} + +// SetBucketNotificationInput is the input parameter of SetBucketNotification function +type SetBucketNotificationInput struct { + Bucket string `xml:"-"` + BucketNotification +} + +type topicConfigurationS3 struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []string `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"` +} + +type bucketNotificationS3 struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"` +} + +type getBucketNotificationOutputS3 struct { + BaseModel + bucketNotificationS3 +} + +// GetBucketNotificationOutput is the result of GetBucketNotification function +type GetBucketNotificationOutput struct { + BaseModel + BucketNotification +} + +// DeleteObjectInput is the input parameter of DeleteObject function +type DeleteObjectInput struct { + Bucket string + Key string + VersionId string +} + +// DeleteObjectOutput is the result of DeleteObject function +type DeleteObjectOutput struct { + BaseModel + VersionId string + DeleteMarker bool +} + +// ObjectToDelete defines the object property in DeleteObjectsInput +type ObjectToDelete struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DeleteObjectsInput is the input parameter of DeleteObjects function +type DeleteObjectsInput struct { + Bucket string `xml:"-"` + XMLName xml.Name `xml:"Delete"` + Quiet bool `xml:"Quiet,omitempty"` + Objects []ObjectToDelete `xml:"Object"` +} + +// Deleted defines the deleted property in DeleteObjectsOutput +type Deleted struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + DeleteMarker bool `xml:"DeleteMarker"` + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` +} + +// Error defines the error property in DeleteObjectsOutput +type Error struct { + XMLName xml.Name `xml:"Error"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +// DeleteObjectsOutput is the result of DeleteObjects function +type DeleteObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"DeleteResult"` + Deleteds []Deleted `xml:"Deleted"` + Errors []Error `xml:"Error"` +} + +// SetObjectAclInput is the input parameter of SetObjectAcl function +type SetObjectAclInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// GetObjectAclInput is the input parameter of GetObjectAcl function +type GetObjectAclInput struct { + Bucket string + Key string + VersionId string +} + +// GetObjectAclOutput is the result of GetObjectAcl function +type GetObjectAclOutput struct { + BaseModel + VersionId string + AccessControlPolicy +} + +// RestoreObjectInput is the input parameter of RestoreObject function +type RestoreObjectInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + XMLName xml.Name `xml:"RestoreRequest"` + Days int `xml:"Days"` + Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"` +} + +// ISseHeader defines the sse encryption header +type ISseHeader interface { + GetEncryption() string + GetKey() string +} + +// SseKmsHeader defines the SseKms header +type SseKmsHeader struct { + Encryption string + Key string + isObs bool +} + +// SseCHeader defines the SseC header +type SseCHeader struct { + Encryption string + Key string + KeyMD5 string +} + +// GetObjectMetadataInput is the input parameter of GetObjectMetadata function +type GetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + Origin string + RequestHeader string + SseHeader ISseHeader +} + +// GetObjectMetadataOutput is the result of GetObjectMetadata function +type GetObjectMetadataOutput struct { + BaseModel + VersionId string + WebsiteRedirectLocation string + Expiration string + Restore string + ObjectType string + NextAppendPosition string + StorageClass StorageClassType + ContentLength int64 + ContentType string + ETag string + AllowOrigin string + AllowHeader string + AllowMethod string + ExposeHeader string + MaxAgeSeconds int + LastModified time.Time + SseHeader ISseHeader + Metadata map[string]string +} + +// GetObjectInput is the input parameter of GetObject function +type GetObjectInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfUnmodifiedSince time.Time + IfModifiedSince time.Time + RangeStart int64 + RangeEnd int64 + ImageProcess string + ResponseCacheControl string + ResponseContentDisposition string + ResponseContentEncoding string + ResponseContentLanguage string + ResponseContentType string + ResponseExpires string +} + +// GetObjectOutput is the result of GetObject function +type GetObjectOutput struct { + GetObjectMetadataOutput + DeleteMarker bool + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + Expires string + Body io.ReadCloser +} + +// ObjectOperationInput defines the object operation properties +type ObjectOperationInput struct { + Bucket string + Key string + ACL AclType + GrantReadId string + GrantReadAcpId string + GrantWriteAcpId string + GrantFullControlId string + StorageClass StorageClassType + WebsiteRedirectLocation string + Expires int64 + SseHeader ISseHeader + Metadata map[string]string +} + +// PutObjectBasicInput defines the basic object operation properties +type PutObjectBasicInput struct { + ObjectOperationInput + ContentType string + ContentMD5 string + ContentLength int64 +} + +// PutObjectInput is the input parameter of PutObject function +type PutObjectInput struct { + PutObjectBasicInput + Body io.Reader +} + +// PutFileInput is the input parameter of PutFile function +type PutFileInput struct { + PutObjectBasicInput + SourceFile string +} + +// PutObjectOutput is the result of PutObject function +type PutObjectOutput struct { + BaseModel + VersionId string + SseHeader ISseHeader + StorageClass StorageClassType + ETag string +} + +// CopyObjectInput is the input parameter of CopyObject function +type CopyObjectInput struct { + ObjectOperationInput + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceIfMatch string + CopySourceIfNoneMatch string + CopySourceIfUnmodifiedSince time.Time + CopySourceIfModifiedSince time.Time + SourceSseHeader ISseHeader + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + MetadataDirective MetadataDirectiveType + SuccessActionRedirect string +} + +// CopyObjectOutput is the result of CopyObject function +type CopyObjectOutput struct { + BaseModel + CopySourceVersionId string `xml:"-"` + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function +type AbortMultipartUploadInput struct { + Bucket string + Key string + UploadId string +} + +// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function +type InitiateMultipartUploadInput struct { + ObjectOperationInput + ContentType string +} + +// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function +type InitiateMultipartUploadOutput struct { + BaseModel + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + SseHeader ISseHeader +} + +// UploadPartInput is the input parameter of UploadPart function +type UploadPartInput struct { + Bucket string + Key string + PartNumber int + UploadId string + ContentMD5 string + SseHeader ISseHeader + Body io.Reader + SourceFile string + Offset int64 + PartSize int64 +} + +// UploadPartOutput is the result of UploadPart function +type UploadPartOutput struct { + BaseModel + PartNumber int + ETag string + SseHeader ISseHeader +} + +// Part defines the part properties +type Part struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified,omitempty"` + Size int64 `xml:"Size,omitempty"` +} + +// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function +type CompleteMultipartUploadInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + UploadId string `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []Part `xml:"Part"` +} + +// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function +type CompleteMultipartUploadOutput struct { + BaseModel + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + ETag string `xml:"ETag"` +} + +// ListPartsInput is the input parameter of ListParts function +type ListPartsInput struct { + Bucket string + Key string + UploadId string + MaxParts int + PartNumberMarker int +} + +// ListPartsOutput is the result of ListParts function +type ListPartsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + PartNumberMarker int `xml:"PartNumberMarker"` + NextPartNumberMarker int `xml:"NextPartNumberMarker"` + MaxParts int `xml:"MaxParts"` + IsTruncated bool `xml:"IsTruncated"` + StorageClass StorageClassType `xml:"StorageClass"` + Initiator Initiator `xml:"Initiator"` + Owner Owner `xml:"Owner"` + Parts []Part `xml:"Part"` +} + +// CopyPartInput is the input parameter of CopyPart function +type CopyPartInput struct { + Bucket string + Key string + UploadId string + PartNumber int + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceRangeStart int64 + CopySourceRangeEnd int64 + SseHeader ISseHeader + SourceSseHeader ISseHeader +} + +// CopyPartOutput is the result of CopyPart function +type CopyPartOutput struct { + BaseModel + XMLName xml.Name `xml:"CopyPartResult"` + PartNumber int `xml:"-"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified"` + SseHeader ISseHeader `xml:"-"` +} + +// CreateSignedUrlInput is the input parameter of CreateSignedUrl function +type CreateSignedUrlInput struct { + Method HttpMethodType + Bucket string + Key string + SubResource SubResourceType + Expires int + Headers map[string]string + QueryParams map[string]string +} + +// CreateSignedUrlOutput is the result of CreateSignedUrl function +type CreateSignedUrlOutput struct { + SignedUrl string + ActualSignedRequestHeaders http.Header +} + +// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureInput struct { + Bucket string + Key string + Expires int + FormParams map[string]string +} + +// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureOutput struct { + OriginPolicy string + Policy string + Algorithm string + Credential string + Date string + Signature string +} + +// HeadObjectInput is the input parameter of HeadObject function +type HeadObjectInput struct { + Bucket string + Key string + VersionId string +} + +// BucketPayer defines the request payment configuration +type BucketPayer struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer PayerType `xml:"Payer"` +} + +// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function +type SetBucketRequestPaymentInput struct { + Bucket string `xml:"-"` + BucketPayer +} + +// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function +type GetBucketRequestPaymentOutput struct { + BaseModel + BucketPayer +} + +// UploadFileInput is the input parameter of UploadFile function +type UploadFileInput struct { + ObjectOperationInput + ContentType string + UploadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string +} + +// DownloadFileInput is the input parameter of DownloadFile function +type DownloadFileInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + DownloadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string +} + +// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function +type SetBucketFetchPolicyInput struct { + Bucket string + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function +type GetBucketFetchPolicyInput struct { + Bucket string +} + +// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function +type GetBucketFetchPolicyOutput struct { + BaseModel + FetchResponse `json:"fetch"` +} + +// FetchResponse defines the response fetch policy configuration +type FetchResponse struct { + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function +type DeleteBucketFetchPolicyInput struct { + Bucket string +} + +// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function +type SetBucketFetchJobInput struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host,omitempty"` + Key string `json:"key,omitempty"` + Md5 string `json:"md5,omitempty"` + CallBackURL string `json:"callbackurl,omitempty"` + CallBackBody string `json:"callbackbody,omitempty"` + CallBackBodyType string `json:"callbackbodytype,omitempty"` + CallBackHost string `json:"callbackhost,omitempty"` + FileType string `json:"file_type,omitempty"` + IgnoreSameKey bool `json:"ignore_same_key,omitempty"` + ObjectHeaders map[string]string `json:"objectheaders,omitempty"` + Etag string `json:"etag,omitempty"` + TrustName string `json:"trustname,omitempty"` +} + +// SetBucketFetchJobOutput is the result of SetBucketFetchJob function +type SetBucketFetchJobOutput struct { + BaseModel + SetBucketFetchJobResponse +} + +// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration +type SetBucketFetchJobResponse struct { + ID string `json:"id"` + Wait int `json:"Wait"` +} + +// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function +type GetBucketFetchJobInput struct { + Bucket string + JobID string +} + +// GetBucketFetchJobOutput is the result of GetBucketFetchJob function +type GetBucketFetchJobOutput struct { + BaseModel + GetBucketFetchJobResponse +} + +// GetBucketFetchJobResponse defines the response fetch job configuration +type GetBucketFetchJobResponse struct { + Err string `json:"err"` + Code string `json:"code"` + Status string `json:"status"` + Job JobResponse `json:"job"` +} + +// JobResponse defines the response job configuration +type JobResponse struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host"` + Key string `json:"key"` + Md5 string `json:"md5"` + CallBackURL string `json:"callbackurl"` + CallBackBody string `json:"callbackbody"` + CallBackBodyType string `json:"callbackbodytype"` + CallBackHost string `json:"callbackhost"` + FileType string `json:"file_type"` + IgnoreSameKey bool `json:"ignore_same_key"` +} diff --git a/modules/obs/pool.go b/modules/obs/pool.go new file mode 100755 index 000000000..4596f0a16 --- /dev/null +++ b/modules/obs/pool.go @@ -0,0 +1,543 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" +) + +// Future defines interface with function: Get +type Future interface { + Get() interface{} +} + +// FutureResult for task result +type FutureResult struct { + result interface{} + resultChan chan interface{} + lock sync.Mutex +} + +type panicResult struct { + presult interface{} +} + +func (f *FutureResult) checkPanic() interface{} { + if r, ok := f.result.(panicResult); ok { + panic(r.presult) + } + return f.result +} + +// Get gets the task result +func (f *FutureResult) Get() interface{} { + if f.resultChan == nil { + return f.checkPanic() + } + f.lock.Lock() + defer f.lock.Unlock() + if f.resultChan == nil { + return f.checkPanic() + } + + f.result = <-f.resultChan + close(f.resultChan) + f.resultChan = nil + return f.checkPanic() +} + +// Task defines interface with function: Run +type Task interface { + Run() interface{} +} + +type funcWrapper struct { + f func() interface{} +} + +func (fw *funcWrapper) Run() interface{} { + if fw.f != nil { + return fw.f() + } + return nil +} + +type taskWrapper struct { + t Task + f *FutureResult +} + +func (tw *taskWrapper) Run() interface{} { + if tw.t != nil { + return tw.t.Run() + } + return nil +} + +type signalTask struct { + id string +} + +func (signalTask) Run() interface{} { + return nil +} + +type worker struct { + name string + taskQueue chan Task + wg *sync.WaitGroup + pool *RoutinePool +} + +func runTask(t Task) { + if tw, ok := t.(*taskWrapper); ok { + defer func() { + if r := recover(); r != nil { + tw.f.resultChan <- panicResult{ + presult: r, + } + } + }() + ret := t.Run() + tw.f.resultChan <- ret + } else { + t.Run() + } +} + +func (*worker) runTask(t Task) { + runTask(t) +} + +func (w *worker) start() { + go func() { + defer func() { + if w.wg != nil { + w.wg.Done() + } + }() + for { + task, ok := <-w.taskQueue + if !ok { + break + } + w.pool.AddCurrentWorkingCnt(1) + w.runTask(task) + w.pool.AddCurrentWorkingCnt(-1) + if w.pool.autoTuneWorker(w) { + break + } + } + }() +} + +func (w *worker) release() { + w.taskQueue = nil + w.wg = nil + w.pool = nil +} + +// Pool defines coroutine pool interface +type Pool interface { + ShutDown() + Submit(t Task) (Future, error) + SubmitFunc(f func() interface{}) (Future, error) + Execute(t Task) + ExecuteFunc(f func() interface{}) + GetMaxWorkerCnt() int64 + AddMaxWorkerCnt(value int64) int64 + GetCurrentWorkingCnt() int64 + AddCurrentWorkingCnt(value int64) int64 + GetWorkerCnt() int64 + AddWorkerCnt(value int64) int64 + EnableAutoTune() +} + +type basicPool struct { + maxWorkerCnt int64 + workerCnt int64 + currentWorkingCnt int64 + isShutDown int32 +} + +// ErrTaskInvalid will be returned if the task is nil +var ErrTaskInvalid = errors.New("Task is nil") + +func (pool *basicPool) GetCurrentWorkingCnt() int64 { + return atomic.LoadInt64(&pool.currentWorkingCnt) +} + +func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 { + return atomic.AddInt64(&pool.currentWorkingCnt, value) +} + +func (pool *basicPool) GetWorkerCnt() int64 { + return atomic.LoadInt64(&pool.workerCnt) +} + +func (pool *basicPool) AddWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.workerCnt, value) +} + +func (pool *basicPool) GetMaxWorkerCnt() int64 { + return atomic.LoadInt64(&pool.maxWorkerCnt) +} + +func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.maxWorkerCnt, value) +} + +func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool { + return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue) +} + +func (pool *basicPool) EnableAutoTune() { + +} + +// RoutinePool defines the coroutine pool struct +type RoutinePool struct { + basicPool + taskQueue chan Task + dispatchQueue chan Task + workers map[string]*worker + cacheCnt int + wg *sync.WaitGroup + lock *sync.Mutex + shutDownWg *sync.WaitGroup + autoTune int32 +} + +// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function +var ErrSubmitTimeout = errors.New("Submit task timeout") + +// ErrPoolShutDown will be returned if RoutinePool is shutdown +var ErrPoolShutDown = errors.New("RoutinePool is shutdown") + +// ErrTaskReject will be returned if submit task is rejected +var ErrTaskReject = errors.New("Submit task is rejected") + +var closeQueue = signalTask{id: "closeQueue"} + +// NewRoutinePool creates a RoutinePool instance +func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &RoutinePool{ + cacheCnt: cacheCnt, + wg: new(sync.WaitGroup), + lock: new(sync.Mutex), + shutDownWg: new(sync.WaitGroup), + autoTune: 0, + } + pool.isShutDown = 0 + pool.maxWorkerCnt += int64(maxWorkerCnt) + if pool.cacheCnt <= 0 { + pool.taskQueue = make(chan Task) + } else { + pool.taskQueue = make(chan Task, pool.cacheCnt) + } + pool.workers = make(map[string]*worker, pool.maxWorkerCnt) + // dispatchQueue must not have length + pool.dispatchQueue = make(chan Task) + pool.dispatcher() + + return pool +} + +// EnableAutoTune sets the autoTune enabled +func (pool *RoutinePool) EnableAutoTune() { + atomic.StoreInt32(&pool.autoTune, 1) +} + +func (pool *RoutinePool) checkStatus(t Task) error { + if t == nil { + return ErrTaskInvalid + } + + if atomic.LoadInt32(&pool.isShutDown) == 1 { + return ErrPoolShutDown + } + return nil +} + +func (pool *RoutinePool) dispatcher() { + pool.shutDownWg.Add(1) + go func() { + for { + task, ok := <-pool.dispatchQueue + if !ok { + break + } + + if task == closeQueue { + close(pool.taskQueue) + pool.shutDownWg.Done() + continue + } + + if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() { + pool.addWorker() + } + + pool.taskQueue <- task + } + }() +} + +// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it +func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 { + if atomic.LoadInt32(&pool.autoTune) == 1 { + return pool.basicPool.AddMaxWorkerCnt(value) + } + return pool.GetMaxWorkerCnt() +} + +func (pool *RoutinePool) addWorker() { + if atomic.LoadInt32(&pool.autoTune) == 1 { + pool.lock.Lock() + defer pool.lock.Unlock() + } + w := &worker{} + w.name = fmt.Sprintf("woker-%d", len(pool.workers)) + w.taskQueue = pool.taskQueue + w.wg = pool.wg + pool.AddWorkerCnt(1) + w.pool = pool + pool.workers[w.name] = w + pool.wg.Add(1) + w.start() +} + +func (pool *RoutinePool) autoTuneWorker(w *worker) bool { + if atomic.LoadInt32(&pool.autoTune) == 0 { + return false + } + + if w == nil { + return false + } + + workerCnt := pool.GetWorkerCnt() + maxWorkerCnt := pool.GetMaxWorkerCnt() + if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) { + pool.lock.Lock() + defer pool.lock.Unlock() + delete(pool.workers, w.name) + w.wg.Done() + w.release() + return true + } + + return false +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *RoutinePool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Execute pushes the specified task to the dispatchQueue +func (pool *RoutinePool) Execute(t Task) { + if t != nil { + pool.dispatchQueue <- t + } +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} + +// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info +func (pool *RoutinePool) Submit(t Task) (Future, error) { + if err := pool.checkStatus(t); err != nil { + return nil, err + } + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + pool.dispatchQueue <- tw + return f, nil +} + +// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info. +// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time. +func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) { + if timeout <= 0 { + return pool.Submit(t) + } + if err := pool.checkStatus(t); err != nil { + return nil, err + } + timeoutChan := make(chan bool, 1) + go func() { + time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout))) + timeoutChan <- true + close(timeoutChan) + }() + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + select { + case pool.dispatchQueue <- tw: + return f, nil + case _, ok := <-timeoutChan: + if ok { + return nil, ErrSubmitTimeout + } + return nil, ErrSubmitTimeout + } +} + +func (pool *RoutinePool) beforeCloseDispatchQueue() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.dispatchQueue <- closeQueue + pool.wg.Wait() +} + +func (pool *RoutinePool) doCloseDispatchQueue() { + close(pool.dispatchQueue) + pool.shutDownWg.Wait() +} + +// ShutDown closes the RoutinePool instance +func (pool *RoutinePool) ShutDown() { + pool.beforeCloseDispatchQueue() + pool.doCloseDispatchQueue() + for _, w := range pool.workers { + w.release() + } + pool.workers = nil + pool.taskQueue = nil + pool.dispatchQueue = nil +} + +// NoChanPool defines the coroutine pool struct +type NoChanPool struct { + basicPool + wg *sync.WaitGroup + tokens chan interface{} +} + +// NewNochanPool creates a new NoChanPool instance +func NewNochanPool(maxWorkerCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &NoChanPool{ + wg: new(sync.WaitGroup), + tokens: make(chan interface{}, maxWorkerCnt), + } + pool.isShutDown = 0 + pool.AddMaxWorkerCnt(int64(maxWorkerCnt)) + + for i := 0; i < maxWorkerCnt; i++ { + pool.tokens <- struct{}{} + } + + return pool +} + +func (pool *NoChanPool) acquire() { + <-pool.tokens +} + +func (pool *NoChanPool) release() { + pool.tokens <- 1 +} + +func (pool *NoChanPool) execute(t Task) { + pool.wg.Add(1) + go func() { + pool.acquire() + defer func() { + pool.release() + pool.wg.Done() + }() + runTask(t) + }() +} + +// ShutDown closes the NoChanPool instance +func (pool *NoChanPool) ShutDown() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.wg.Wait() +} + +// Execute executes the specified task +func (pool *NoChanPool) Execute(t Task) { + if t != nil { + pool.execute(t) + } +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *NoChanPool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Submit executes the specified task, and returns the FutureResult and error info +func (pool *NoChanPool) Submit(t Task) (Future, error) { + if t == nil { + return nil, ErrTaskInvalid + } + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + + pool.execute(tw) + return f, nil +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} diff --git a/modules/obs/temporary.go b/modules/obs/temporary.go new file mode 100755 index 000000000..dfb87ffc6 --- /dev/null +++ b/modules/obs/temporary.go @@ -0,0 +1,895 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "github.com/unknwon/com" + "io" + "net/http" + "os" + "strings" + "time" +) + +// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error +func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) { + if input == nil { + return nil, errors.New("CreateSignedUrlInput is nil") + } + + params := make(map[string]string, len(input.QueryParams)) + for key, value := range input.QueryParams { + params[key] = value + } + + if input.SubResource != "" { + params[string(input.SubResource)] = "" + } + + headers := make(map[string][]string, len(input.Headers)) + for key, value := range input.Headers { + headers[key] = []string{value} + } + + if input.Expires <= 0 { + input.Expires = 300 + } + + requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires)) + if err != nil { + return nil, err + } + + output = &CreateSignedUrlOutput{ + SignedUrl: requestURL, + ActualSignedRequestHeaders: headers, + } + return +} + +func (obsClient ObsClient) isSecurityToken(params map[string]string) { + if obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken + } + } +} + +// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput, +// and returns the CreateBrowserBasedSignatureOutput and error +func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) { + if input == nil { + return nil, errors.New("CreateBrowserBasedSignatureInput is nil") + } + + params := make(map[string]string, len(input.FormParams)) + for key, value := range input.FormParams { + params[key] = value + } + + date := time.Now().UTC() + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + + credential, _ := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate) + + if input.Expires <= 0 { + input.Expires = 300 + } + + expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT) + if obsClient.conf.signature == SignatureV4 { + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + } + + obsClient.isSecurityToken(params) + + matchAnyBucket := true + matchAnyKey := true + count := 5 + if bucket := strings.TrimSpace(input.Bucket); bucket != "" { + params["bucket"] = bucket + matchAnyBucket = false + count-- + } + + if key := strings.TrimSpace(input.Key); key != "" { + params["key"] = key + matchAnyKey = false + count-- + } + + originPolicySlice := make([]string, 0, len(params)+count) + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration)) + originPolicySlice = append(originPolicySlice, "\"conditions\":[") + for key, value := range params { + if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" { + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value)) + } + } + + if matchAnyBucket { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],") + } + + if matchAnyKey { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],") + } + + originPolicySlice = append(originPolicySlice, "]}") + + originPolicy := strings.Join(originPolicySlice, "") + policy := Base64Encode([]byte(originPolicy)) + var signature string + if obsClient.conf.signature == SignatureV4 { + signature = getSignature(policy, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate) + } else { + signature = Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(policy))) + } + + output = &CreateBrowserBasedSignatureOutput{ + OriginPolicy: originPolicy, + Policy: policy, + Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL], + Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL], + Date: params[PARAM_DATE_AMZ_CAMEL], + Signature: signature, + } + return +} + +// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers +func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) { + output = &ListBucketsOutput{} + err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) { + output = &ListObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) { + output = &ListVersionsOutput{} + err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a +// specified bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) { + output = &ListMultipartUploadsOutput{} + err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) { + output = &GetBucketNotificationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) { + output = &DeleteObjectOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data +func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) { + output = &DeleteObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) { + output = &GetObjectAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) { + output = &GetObjectMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) { + output = &GetObjectOutput{} + err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) { + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path +func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) { + var data io.Reader + sourceFile = strings.TrimSpace(sourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + var contentLength int64 + if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok { + contentLength = StringToInt64(value[0], -1) + } else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok { + contentLength = StringToInt64(value[0], -1) + } else { + contentLength = stat.Size() + } + if contentLength > stat.Size() { + return nil, errors.New("ContentLength is larger than fileSize") + } + fileReaderWrapper.totalCount = contentLength + data = fileReaderWrapper + } + + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers +func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) { + output = &CopyObjectOutput{} + err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers +func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) { + output = &InitiateMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + } + return +} + +// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) { + output = &UploadPartOutput{} + err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + } + return +} + +// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) { + output = &CompleteMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + } + return +} + +// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) { + output = &ListPartsOutput{} + err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) { + output = &CopyPartOutput{} + err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + } + return +} + +// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + + +func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (*http.Request, error) { + var req *http.Request + + input := &UploadPartInput{} + input.Bucket = bucketName + input.Key = objectKey + input.PartNumber = partNumber + input.UploadId = uploadId + //input.ContentMD5 = _input.ContentMD5 + //input.SourceFile = _input.SourceFile + //input.Offset = _input.Offset + input.PartSize = partSize + //input.SseHeader = _input.SseHeader + //input.Body = _input.Body + + params, headers, _, err := input.trans(obsClient.conf.signature == SignatureObs) + if err != nil { + return req, err + } + + if params == nil { + params = make(map[string]string) + } + + if headers == nil { + headers = make(map[string][]string) + } + + var extensions []extensionOptions + for _, extension := range extensions { + if extensionHeader, ok := extension.(extensionHeaders); ok { + _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err)) + } + } else { + doLog(LEVEL_WARN, "Unsupported extensionOptions") + } + } + + headers["Content-Length"] = []string{com.ToStr(partNumber,10)} + + requestURL, err := obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "") + if err != nil { + return req, nil + } + + var _data io.Reader + req, err = http.NewRequest(HTTP_PUT, requestURL, _data) + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + if err != nil { + return req, err + } + + if isDebugLogEnabled() { + auth := headers[HEADER_AUTH_CAMEL] + delete(headers, HEADER_AUTH_CAMEL) + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken { + headers[HEADER_STS_TOKEN_AMZ] = []string{"******"} + } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken { + headers[HEADER_STS_TOKEN_OBS] = []string{"******"} + } + doLog(LEVEL_DEBUG, "Request headers: %v", headers) + headers[HEADER_AUTH_CAMEL] = auth + if isSecurityToken { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = securityToken + } else { + headers[HEADER_STS_TOKEN_AMZ] = securityToken + } + } + } + + for key, value := range headers { + if key == HEADER_HOST_CAMEL { + req.Host = value[0] + delete(headers, key) + } else if key == HEADER_CONTENT_LENGTH_CAMEL { + req.ContentLength = StringToInt64(value[0], -1) + delete(headers, key) + } else { + req.Header[key] = value + } + } + + var lastRequest *http.Request + lastRequest = req + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + + if lastRequest != nil { + req.Host = lastRequest.Host + req.ContentLength = lastRequest.ContentLength + } + + return req, nil +} diff --git a/modules/obs/trait.go b/modules/obs/trait.go new file mode 100755 index 000000000..9a59d6a71 --- /dev/null +++ b/modules/obs/trait.go @@ -0,0 +1,909 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +//nolint:golint, unused +package obs + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +// IReadCloser defines interface with function: setReadCloser +type IReadCloser interface { + setReadCloser(body io.ReadCloser) +} + +func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) { + output.Body = body +} + +func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) { + if isObs { + header = HEADER_PREFIX_OBS + header + headers[header] = headerValue + } else { + header = HEADER_PREFIX + header + headers[header] = headerValue + } +} + +func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) { + if isObs { + headers[header] = headerValue + } else { + headers[headerNext] = headerValue + } +} + +// IBaseModel defines interface for base response model +type IBaseModel interface { + setStatusCode(statusCode int) + + setRequestID(requestID string) + + setResponseHeaders(responseHeaders map[string][]string) +} + +// ISerializable defines interface with function: trans +type ISerializable interface { + trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) +} + +// DefaultSerializable defines default serializable struct +type DefaultSerializable struct { + params map[string]string + headers map[string][]string + data interface{} +} + +func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) { + return input.params, input.headers, input.data, nil +} + +var defaultSerializable = &DefaultSerializable{} + +func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable { + return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil} +} + +func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(subResource): ""} + data, err = ConvertRequestToIoReader(input) + return +} + +func (baseModel *BaseModel) setStatusCode(statusCode int) { + baseModel.StatusCode = statusCode +} + +func (baseModel *BaseModel) setRequestID(requestID string) { + baseModel.RequestId = requestID +} + +func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) { + baseModel.ResponseHeaders = responseHeaders +} + +func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if input.QueryLocation && !isObs { + setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs) + } + return +} + +func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) { + if grantReadID := input.GrantReadId; grantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs) + } + if grantWriteID := input.GrantWriteId; grantWriteID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs) + } + if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs) + } + if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs) + } + if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs) + } + if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true) + } + if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true) + } +} + +func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs) + } + if epid := input.Epid; epid != "" { + setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs) + } + if availableZone := input.AvailableZone; availableZone != "" { + setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs) + } + + input.prepareGrantHeaders(headers, isObs) + if location := strings.TrimSpace(input.Location); location != "" { + input.Location = location + + xml := make([]string, 0, 3) + xml = append(xml, "") + if isObs { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } else { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } + xml = append(xml, "") + + data = strings.Join(xml, "") + } + return +} + +func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + xml := make([]string, 0, 1) + if !isObs { + storageClass := "STANDARD" + if input.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if input.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + params = map[string]string{string(SubResourceStoragePolicy): ""} + xml = append(xml, fmt.Sprintf("%s", storageClass)) + } else { + if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold { + input.StorageClass = StorageClassStandard + } + params = map[string]string{string(SubResourceStorageClass): ""} + xml = append(xml, fmt.Sprintf("%s", input.StorageClass)) + } + data = strings.Join(xml, "") + return +} + +func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxKeys > 0 { + params["max-keys"] = IntToString(input.MaxKeys) + } + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + if input.Marker != "" { + params["marker"] = input.Marker + } + return +} + +func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + params[string(SubResourceVersions)] = "" + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.VersionIdMarker != "" { + params["version-id-marker"] = input.VersionIdMarker + } + return +} + +func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceUploads): ""} + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxUploads > 0 { + params["max-uploads"] = IntToString(input.MaxUploads) + } + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.UploadIdMarker != "" { + params["upload-id-marker"] = input.UploadIdMarker + } + return +} + +func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceQuota, input) +} + +func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + headers = make(map[string][]string) + + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourcePolicy): ""} + data = strings.NewReader(input.Policy) + return +} + +func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceCors): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceVersioning, input) +} + +func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceWebsite): ""} + data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false) + return +} + +func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLogging): ""} + data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs) + return +} + +func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLifecycle): ""} + data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs) + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceTagging): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceNotification): ""} + data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs) + return +} + +func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceDelete): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceRestore): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + if !isObs { + data, err = ConvertRequestToIoReader(input) + } else { + data = ConverntObsRestoreToXml(input) + } + return +} + +// GetEncryption gets the Encryption field value from SseKmsHeader +func (header SseKmsHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + if !header.isObs { + return DEFAULT_SSE_KMS_ENCRYPTION + } + return DEFAULT_SSE_KMS_ENCRYPTION_OBS +} + +// GetKey gets the Key field value from SseKmsHeader +func (header SseKmsHeader) GetKey() string { + return header.Key +} + +// GetEncryption gets the Encryption field value from SseCHeader +func (header SseCHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + return DEFAULT_SSE_C_ENCRYPTION +} + +// GetKey gets the Key field value from SseCHeader +func (header SseCHeader) GetKey() string { + return header.Key +} + +// GetKeyMD5 gets the KeyMD5 field value from SseCHeader +func (header SseCHeader) GetKeyMD5() string { + if header.KeyMD5 != "" { + return header.KeyMD5 + } + + if ret, err := Base64Decode(header.GetKey()); err == nil { + return Base64Md5(ret) + } + return "" +} + +func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) { + if sseHeader != nil { + if sseCHeader, ok := sseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok { + sseKmsHeader.isObs = isObs + setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs) + if sseKmsHeader.GetKey() != "" { + setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs) + } + } + } +} + +func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if input.Origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin} + } + + if input.RequestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader} + } + setSseHeader(headers, input.SseHeader, true, isObs) + return +} + +func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) { + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage} + } + + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } +} + +func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) { + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } +} + +func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + params = map[string]string{string(SubResourceMetadata): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs) + } else { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs) + } + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl} + } + input.prepareContentHeaders(headers) + if input.Expires != "" { + headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires} + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + } + input.prepareStorageClass(headers, isObs) + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input GetObjectInput) prepareResponseParams(params map[string]string) { + if input.ResponseCacheControl != "" { + params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl + } + if input.ResponseContentDisposition != "" { + params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition + } + if input.ResponseContentEncoding != "" { + params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding + } + if input.ResponseContentLanguage != "" { + params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage + } + if input.ResponseContentType != "" { + params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType + } + if input.ResponseExpires != "" { + params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires + } +} + +func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.GetObjectMetadataInput.trans(isObs) + if err != nil { + return + } + input.prepareResponseParams(params) + if input.ImageProcess != "" { + params[PARAM_IMAGE_PROCESS] = input.ImageProcess + } + if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart { + headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)} + } + + if input.IfMatch != "" { + headers[HEADER_IF_MATCH] = []string{input.IfMatch} + } + if input.IfNoneMatch != "" { + headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch} + } + if !input.IfModifiedSince.IsZero() { + headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)} + } + if !input.IfUnmodifiedSince.IsZero() { + headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)} + } + return +} + +func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) { + if GrantReadID := input.GrantReadId; GrantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true) + } + if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true) + } + if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true) + } + if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true) + } +} + +func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + params = make(map[string]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + input.prepareGrantHeaders(headers) + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + + } + setSseHeader(headers, input.SseHeader, false, isObs) + if input.Expires != 0 { + setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true) + } + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + + if input.ContentLength > 0 { + headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + + return +} + +func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.PutObjectBasicInput.trans(isObs) + if err != nil { + return + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) { + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl} + } + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE] = []string{input.ContentType} + } + if input.Expires != "" { + headers[HEADER_EXPIRES] = []string{input.Expires} + } +} + +func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) { + if input.CopySourceIfMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs) + } + if input.CopySourceIfNoneMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs) + } + if !input.CopySourceIfModifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs) + } + if !input.CopySourceIfUnmodifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs) + } +} + +func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs) + } + + if input.MetadataDirective == ReplaceMetadata { + input.prepareReplaceHeaders(headers) + } + + input.prepareCopySourceHeaders(headers, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + } + if input.SuccessActionRedirect != "" { + headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect} + } + return +} + +func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + return +} + +func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + params[string(SubResourceUploads)] = "" + return +} + +func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string) + setSseHeader(headers, input.SseHeader, true, isObs) + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + data, _ = ConvertCompleteMultipartUploadInputToXml(input, false) + return +} + +func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + if input.MaxParts > 0 { + params["max-parts"] = IntToString(input.MaxParts) + } + if input.PartNumberMarker > 0 { + params["part-number-marker"] = IntToString(input.PartNumberMarker) + } + return +} + +func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string, 1) + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart { + setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs) + } + + setSseHeader(headers, input.SseHeader, true, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + + } + return +} + +func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceRequestPayment, input) +} + +type partSlice []Part + +func (parts partSlice) Len() int { + return len(parts) +} + +func (parts partSlice) Less(i, j int) bool { + return parts[i].PartNumber < parts[j].PartNumber +} + +func (parts partSlice) Swap(i, j int) { + parts[i], parts[j] = parts[j], parts[i] +} + +type readerWrapper struct { + reader io.Reader + mark int64 + totalCount int64 + readedCount int64 +} + +func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) { + if r, ok := rw.reader.(*strings.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*bytes.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*os.File); ok { + return r.Seek(offset, whence) + } + return offset, nil +} + +func (rw *readerWrapper) Read(p []byte) (n int, err error) { + if rw.totalCount == 0 { + return 0, io.EOF + } + if rw.totalCount > 0 { + n, err = rw.reader.Read(p) + readedOnce := int64(n) + remainCount := rw.totalCount - rw.readedCount + if remainCount > readedOnce { + rw.readedCount += readedOnce + return n, err + } + rw.readedCount += remainCount + return int(remainCount), io.EOF + } + return rw.reader.Read(p) +} + +type fileReaderWrapper struct { + readerWrapper + filePath string +} + +func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchPolicyToJSON(input) + return +} + +func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchJobToJSON(input) + return +} + +func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} diff --git a/modules/obs/transfer.go b/modules/obs/transfer.go new file mode 100755 index 000000000..4dc50c0f9 --- /dev/null +++ b/modules/obs/transfer.go @@ -0,0 +1,873 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "syscall" +) + +var errAbort = errors.New("AbortError") + +// FileStatus defines the upload file properties +type FileStatus struct { + XMLName xml.Name `xml:"FileInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` +} + +// UploadPartInfo defines the upload part properties +type UploadPartInfo struct { + XMLName xml.Name `xml:"UploadPart"` + PartNumber int `xml:"PartNumber"` + Etag string `xml:"Etag"` + PartSize int64 `xml:"PartSize"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// UploadCheckpoint defines the upload checkpoint file properties +type UploadCheckpoint struct { + XMLName xml.Name `xml:"UploadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId,omitempty"` + UploadFile string `xml:"FileUrl"` + FileInfo FileStatus `xml:"FileInfo"` + UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"` +} + +func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool { + if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.") + return false + } + + if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.") + return false + } + + if ufc.UploadId == "" { + doLog(LEVEL_INFO, "UploadId is invalid. clear the record.") + return false + } + + return true +} + +type uploadPartTask struct { + UploadPartInput + obsClient *ObsClient + abort *int32 + extensions []extensionOptions + enableCheckpoint bool +} + +func (task *uploadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + + input := &UploadPartInput{} + input.Bucket = task.Bucket + input.Key = task.Key + input.PartNumber = task.PartNumber + input.UploadId = task.UploadId + input.SseHeader = task.SseHeader + input.SourceFile = task.SourceFile + input.Offset = task.Offset + input.PartSize = task.PartSize + extensions := task.extensions + + var output *UploadPartOutput + var err error + if extensions != nil { + output, err = task.obsClient.UploadPart(input, extensions...) + } else { + output, err = task.obsClient.UploadPart(input) + } + + if err == nil { + if output.ETag == "" { + doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber) + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber) + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return err +} + +func loadCheckpointFile(checkpointFile string, result interface{}) error { + ret, err := ioutil.ReadFile(checkpointFile) + if err != nil { + return err + } + if len(ret) == 0 { + return nil + } + return xml.Unmarshal(ret, result) +} + +func updateCheckpointFile(fc interface{}, checkpointFilePath string) error { + result, err := xml.Marshal(fc) + if err != nil { + return err + } + err = ioutil.WriteFile(checkpointFilePath, result, 0666) + return err +} + +func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, ufc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) { + if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err)) + } + } else { + return false, nil + } + + return true, nil +} + +func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error { + initiateInput := &InitiateMultipartUploadInput{} + initiateInput.ObjectOperationInput = input.ObjectOperationInput + initiateInput.ContentType = input.ContentType + var output *InitiateMultipartUploadOutput + var err error + if extensions != nil { + output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...) + } else { + output, err = obsClient.InitiateMultipartUpload(initiateInput) + } + if err != nil { + return err + } + + ufc.Bucket = input.Bucket + ufc.Key = input.Key + ufc.UploadFile = input.UploadFile + ufc.FileInfo = FileStatus{} + ufc.FileInfo.Size = uploadFileStat.Size() + ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix() + ufc.UploadId = output.UploadId + + err = sliceFile(input.PartSize, ufc) + return err +} + +func sliceFile(partSize int64, ufc *UploadCheckpoint) error { + fileSize := ufc.FileInfo.Size + cnt := fileSize / partSize + if cnt >= 10000 { + partSize = fileSize / 10000 + if fileSize%10000 != 0 { + partSize++ + } + cnt = fileSize / partSize + } + if fileSize%partSize != 0 { + cnt++ + } + + if partSize > MAX_PART_SIZE { + doLog(LEVEL_ERROR, "The source upload file is too large") + return fmt.Errorf("The source upload file is too large") + } + + if cnt == 0 { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = 1 + ufc.UploadParts = []UploadPartInfo{uploadPart} + } else { + uploadParts := make([]UploadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = int(i) + 1 + uploadPart.PartSize = partSize + uploadPart.Offset = i * partSize + uploadParts = append(uploadParts, uploadPart) + } + if value := fileSize % partSize; value != 0 { + uploadParts[cnt-1].PartSize = value + } + ufc.UploadParts = uploadParts + } + return nil +} + +func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error { + input := &AbortMultipartUploadInput{} + input.Bucket = bucket + input.Key = key + input.UploadId = uploadID + if extensions != nil { + _, err := obsClient.AbortMultipartUpload(input, extensions...) + return err + } + _, err := obsClient.AbortMultipartUpload(input) + return err +} + +func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error { + if uploadPartError != nil { + if enableCheckpoint { + return uploadPartError + } + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return uploadPartError + } + return nil +} + +func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + completeInput := &CompleteMultipartUploadInput{} + completeInput.Bucket = ufc.Bucket + completeInput.Key = ufc.Key + completeInput.UploadId = ufc.UploadId + parts := make([]Part, 0, len(ufc.UploadParts)) + for _, uploadPart := range ufc.UploadParts { + part := Part{} + part.PartNumber = uploadPart.PartNumber + part.ETag = uploadPart.Etag + parts = append(parts, part) + } + completeInput.Parts = parts + var completeOutput *CompleteMultipartUploadOutput + if extensions != nil { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...) + } else { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput) + } + + if err == nil { + if enableCheckpoint { + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err) + } + } + return completeOutput, err + } + if !enableCheckpoint { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + } + return completeOutput, err +} + +func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + uploadFileStat, err := os.Stat(input.UploadFile) + if err != nil { + doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err)) + return nil, err + } + if uploadFileStat.IsDir() { + doLog(LEVEL_ERROR, "UploadFile can not be a folder.") + return nil, errors.New("uploadFile can not be a folder") + } + + ufc := &UploadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + } + if needCheckpoint { + err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + + if enableCheckpoint { + err = updateCheckpointFile(ufc, checkpointFilePath) + if err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err) + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return nil, err + } + } + } + + uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions) + err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions) + if err != nil { + return nil, err + } + + completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, extensions) + + return completeOutput, err +} + +func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) { + if uploadPartOutput, ok := result.(*UploadPartOutput); ok { + lock.Lock() + defer lock.Unlock() + ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag + ufc.UploadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(ufc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var uploadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, uploadPart := range ufc.UploadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if uploadPart.IsCompleted { + continue + } + task := uploadPartTask{ + UploadPartInput: UploadPartInput{ + Bucket: ufc.Bucket, + Key: ufc.Key, + PartNumber: uploadPart.PartNumber, + UploadId: ufc.UploadId, + SseHeader: input.SseHeader, + SourceFile: input.UploadFile, + Offset: uploadPart.Offset, + PartSize: uploadPart.PartSize, + }, + obsClient: &obsClient, + abort: &abort, + extensions: extensions, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + uploadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := uploadPartError.Load().(error); ok { + return err + } + return nil +} + +// ObjectInfo defines download object info +type ObjectInfo struct { + XMLName xml.Name `xml:"ObjectInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` + ETag string `xml:"ETag"` +} + +// TempFileInfo defines temp download file properties +type TempFileInfo struct { + XMLName xml.Name `xml:"TempFileInfo"` + TempFileUrl string `xml:"TempFileUrl"` + Size int64 `xml:"Size"` +} + +// DownloadPartInfo defines download part properties +type DownloadPartInfo struct { + XMLName xml.Name `xml:"DownloadPart"` + PartNumber int64 `xml:"PartNumber"` + RangeEnd int64 `xml:"RangeEnd"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// DownloadCheckpoint defines download checkpoint file properties +type DownloadCheckpoint struct { + XMLName xml.Name `xml:"DownloadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` + DownloadFile string `xml:"FileUrl"` + ObjectInfo ObjectInfo `xml:"ObjectInfo"` + TempFileInfo TempFileInfo `xml:"TempFileInfo"` + DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"` +} + +func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool { + if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.") + return false + } + if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.") + return false + } + if dfc.TempFileInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.") + return false + } + stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl) + if err != nil || stat.Size() != dfc.ObjectInfo.Size { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.") + return false + } + + return true +} + +type downloadPartTask struct { + GetObjectInput + obsClient *ObsClient + extensions []extensionOptions + abort *int32 + partNumber int64 + tempFileURL string + enableCheckpoint bool +} + +func (task *downloadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + getObjectInput := &GetObjectInput{} + getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput + getObjectInput.IfMatch = task.IfMatch + getObjectInput.IfNoneMatch = task.IfNoneMatch + getObjectInput.IfModifiedSince = task.IfModifiedSince + getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince + getObjectInput.RangeStart = task.RangeStart + getObjectInput.RangeEnd = task.RangeEnd + + var output *GetObjectOutput + var err error + if task.extensions != nil { + output, err = task.obsClient.GetObject(getObjectInput, task.extensions...) + } else { + output, err = task.obsClient.GetObject(getObjectInput) + } + + if err == nil { + defer func() { + errMsg := output.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body.") + } + }() + _err := updateDownloadFile(task.tempFileURL, task.RangeStart, output) + if _err != nil { + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return _err + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return err +} + +func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) { + if extensions != nil { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...) + } else { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput) + } + + return +} + +func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, dfc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !dfc.isValid(input, output) { + if dfc.TempFileInfo.TempFileUrl != "" { + _err := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err) + } + } else { + return false, nil + } + + return true, nil +} + +func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) { + cnt := objectSize / partSize + if objectSize%partSize > 0 { + cnt++ + } + + if cnt == 0 { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = 1 + dfc.DownloadParts = []DownloadPartInfo{downloadPart} + } else { + downloadParts := make([]DownloadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = i + 1 + downloadPart.Offset = i * partSize + downloadPart.RangeEnd = (i+1)*partSize - 1 + downloadParts = append(downloadParts, downloadPart) + } + dfc.DownloadParts = downloadParts + if value := objectSize % partSize; value > 0 { + dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1 + } + } +} + +func createFile(tempFileURL string, fileSize int64) error { + fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := syscall.Close(fd) + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + err = syscall.Ftruncate(fd, fileSize) + if err != nil { + doLog(LEVEL_WARN, "Failed to create file with error [%v].", err) + } + return err +} + +func prepareTempFile(tempFileURL string, fileSize int64) error { + parentDir := filepath.Dir(tempFileURL) + stat, err := os.Stat(parentDir) + if err != nil { + doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err) + _err := os.MkdirAll(parentDir, os.ModePerm) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err) + return _err + } + } else if !stat.IsDir() { + doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir) + return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir) + } + + err = createFile(tempFileURL, fileSize) + if err == nil { + return nil + } + fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + if fileSize > 0 { + _, err = fd.WriteAt([]byte("a"), fileSize-1) + if err != nil { + doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err) + return err + } + } + + return nil +} + +func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error { + if downloadFileError != nil { + if !enableCheckpoint { + _err := os.Remove(tempFileURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + return downloadFileError + } + return nil +} + +func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) { + getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions) + if err != nil { + return nil, err + } + + objectSize := getObjectmetaOutput.ContentLength + partSize := input.PartSize + dfc := &DownloadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput) + if err != nil { + return nil, err + } + } + + if needCheckpoint { + dfc.Bucket = input.Bucket + dfc.Key = input.Key + dfc.VersionId = input.VersionId + dfc.DownloadFile = input.DownloadFile + dfc.ObjectInfo = ObjectInfo{} + dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix() + dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength + dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag + dfc.TempFileInfo = TempFileInfo{} + dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp" + dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength + + sliceObject(objectSize, partSize, dfc) + _err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size) + if _err != nil { + return nil, _err + } + + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err) + _errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _errMsg != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg) + } + return nil, _err + } + } + } + + downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions) + err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError) + if err != nil { + return nil, err + } + + err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile) + if err != nil { + doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err) + return nil, err + } + if enableCheckpoint { + err = os.Remove(checkpointFilePath) + if err != nil { + doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err) + } + } + + return getObjectmetaOutput, nil +} + +func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error { + fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + _, err = fd.Seek(rangeStart, 0) + if err != nil { + doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err) + return err + } + fileWriter := bufio.NewWriterSize(fd, 65536) + part := make([]byte, 8192) + var readErr error + var readCount int + for { + readCount, readErr = output.Body.Read(part) + if readCount > 0 { + wcnt, werr := fileWriter.Write(part[0:readCount]) + if werr != nil { + doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr) + return werr + } + if wcnt != readCount { + doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + } + } + if readErr != nil { + if readErr != io.EOF { + doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr) + return readErr + } + break + } + } + err = fileWriter.Flush() + if err != nil { + doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err) + return err + } + return nil +} + +func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) { + if _, ok := result.(*GetObjectOutput); ok { + lock.Lock() + defer lock.Unlock() + dfc.DownloadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFile) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var downloadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, downloadPart := range dfc.DownloadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if downloadPart.IsCompleted { + continue + } + task := downloadPartTask{ + GetObjectInput: GetObjectInput{ + GetObjectMetadataInput: input.GetObjectMetadataInput, + IfMatch: input.IfMatch, + IfNoneMatch: input.IfNoneMatch, + IfUnmodifiedSince: input.IfUnmodifiedSince, + IfModifiedSince: input.IfModifiedSince, + RangeStart: downloadPart.Offset, + RangeEnd: downloadPart.RangeEnd, + }, + obsClient: &obsClient, + extensions: extensions, + abort: &abort, + partNumber: downloadPart.PartNumber, + tempFileURL: dfc.TempFileInfo.TempFileUrl, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + downloadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := downloadPartError.Load().(error); ok { + return err + } + + return nil +} diff --git a/modules/obs/util.go b/modules/obs/util.go new file mode 100755 index 000000000..f3378dff9 --- /dev/null +++ b/modules/obs/util.go @@ -0,0 +1,536 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$") +var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$") +var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+") +var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+") + +// StringContains replaces subStr in src with subTranscoding and returns the new string +func StringContains(src string, subStr string, subTranscoding string) string { + return strings.Replace(src, subStr, subTranscoding, -1) +} + +// XmlTranscoding replaces special characters with their escaped form +func XmlTranscoding(src string) string { + srcTmp := StringContains(src, "&", "&") + srcTmp = StringContains(srcTmp, "<", "<") + srcTmp = StringContains(srcTmp, ">", ">") + srcTmp = StringContains(srcTmp, "'", "'") + srcTmp = StringContains(srcTmp, "\"", """) + return srcTmp +} + +// StringToInt converts string value to int value with default value +func StringToInt(value string, def int) int { + ret, err := strconv.Atoi(value) + if err != nil { + ret = def + } + return ret +} + +// StringToInt64 converts string value to int64 value with default value +func StringToInt64(value string, def int64) int64 { + ret, err := strconv.ParseInt(value, 10, 64) + if err != nil { + ret = def + } + return ret +} + +// IntToString converts int value to string value +func IntToString(value int) string { + return strconv.Itoa(value) +} + +// Int64ToString converts int64 value to string value +func Int64ToString(value int64) string { + return strconv.FormatInt(value, 10) +} + +// GetCurrentTimestamp gets unix time in milliseconds +func GetCurrentTimestamp() int64 { + return time.Now().UnixNano() / 1000000 +} + +// FormatUtcNow gets a textual representation of the UTC format time value +func FormatUtcNow(format string) string { + return time.Now().UTC().Format(format) +} + +// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value +func FormatUtcToRfc1123(t time.Time) string { + ret := t.UTC().Format(time.RFC1123) + return ret[:strings.LastIndex(ret, "UTC")] + "GMT" +} + +// Md5 gets the md5 value of input +func Md5(value []byte) []byte { + m := md5.New() + _, err := m.Write(value) + if err != nil { + doLog(LEVEL_WARN, "MD5 failed to write") + } + return m.Sum(nil) +} + +// HmacSha1 gets hmac sha1 value of input +func HmacSha1(key, value []byte) []byte { + mac := hmac.New(sha1.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha1 failed to write") + } + return mac.Sum(nil) +} + +// HmacSha256 get hmac sha256 value if input +func HmacSha256(key, value []byte) []byte { + mac := hmac.New(sha256.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha256 failed to write") + } + return mac.Sum(nil) +} + +// Base64Encode wrapper of base64.StdEncoding.EncodeToString +func Base64Encode(value []byte) string { + return base64.StdEncoding.EncodeToString(value) +} + +// Base64Decode wrapper of base64.StdEncoding.DecodeString +func Base64Decode(value string) ([]byte, error) { + return base64.StdEncoding.DecodeString(value) +} + +// HexMd5 returns the md5 value of input in hexadecimal format +func HexMd5(value []byte) string { + return Hex(Md5(value)) +} + +// Base64Md5 returns the md5 value of input with Base64Encode +func Base64Md5(value []byte) string { + return Base64Encode(Md5(value)) +} + +// Sha256Hash returns sha256 checksum +func Sha256Hash(value []byte) []byte { + hash := sha256.New() + _, err := hash.Write(value) + if err != nil { + doLog(LEVEL_WARN, "Sha256Hash failed to write") + } + return hash.Sum(nil) +} + +// ParseXml wrapper of xml.Unmarshal +func ParseXml(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return xml.Unmarshal(value, result) +} + +// parseJSON wrapper of json.Unmarshal +func parseJSON(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return json.Unmarshal(value, result) +} + +// TransToXml wrapper of xml.Marshal +func TransToXml(value interface{}) ([]byte, error) { + if value == nil { + return []byte{}, nil + } + return xml.Marshal(value) +} + +// Hex wrapper of hex.EncodeToString +func Hex(value []byte) string { + return hex.EncodeToString(value) +} + +// HexSha256 returns the Sha256Hash value of input in hexadecimal format +func HexSha256(value []byte) string { + return Hex(Sha256Hash(value)) +} + +// UrlDecode wrapper of url.QueryUnescape +func UrlDecode(value string) (string, error) { + ret, err := url.QueryUnescape(value) + if err == nil { + return ret, nil + } + return "", err +} + +// UrlDecodeWithoutError wrapper of UrlDecode +func UrlDecodeWithoutError(value string) string { + ret, err := UrlDecode(value) + if err == nil { + return ret + } + if isErrorLogEnabled() { + doLog(LEVEL_ERROR, "Url decode error") + } + return "" +} + +// IsIP checks whether the value matches ip address +func IsIP(value string) bool { + return ipRegex.MatchString(value) +} + +// UrlEncode encodes the input value +func UrlEncode(value string, chineseOnly bool) string { + if chineseOnly { + values := make([]string, 0, len(value)) + for _, val := range value { + _value := string(val) + if regex.MatchString(_value) { + _value = url.QueryEscape(_value) + } + values = append(values, _value) + } + return strings.Join(values, "") + } + return url.QueryEscape(value) +} + +func copyHeaders(m map[string][]string) (ret map[string][]string) { + if m != nil { + ret = make(map[string][]string, len(m)) + for key, values := range m { + _values := make([]string, 0, len(values)) + for _, value := range values { + _values = append(_values, value) + } + ret[strings.ToLower(key)] = _values + } + } else { + ret = make(map[string][]string) + } + + return +} + +func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) { + signature = "v2" + if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 { + if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) { + signature = "v4" + matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0]) + if len(matches) >= 3 { + region = matches[1] + regions := regionRegex.FindStringSubmatch(region) + if len(regions) >= 2 { + region = regions[1] + } + signedHeaders = matches[2] + } + + } else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) { + signature = "v2" + } + } + return +} + +func getTemporaryKeys() []string { + return []string{ + "Signature", + "signature", + "X-Amz-Signature", + "x-amz-signature", + } +} + +func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool { + isObs := true + if isTemporary { + for _, value := range querys { + keyPrefix := strings.ToLower(value) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + } else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) { + isObs = false + } + } + } else { + for key := range headers { + keyPrefix := strings.ToLower(key) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + break + } + } + } + return isObs +} + +func isPathStyle(headers map[string][]string, bucketName string) bool { + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + return true + } + return false +} + +// GetV2Authorization v2 Authorization +func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + params := make(map[string]string) + + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + headers = copyHeaders(headers) + pathStyle := isPathStyle(headers, bucketName) + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + conf.signature = SignatureObs + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true) + v2HashPrefix := OBS_HASH_PREFIX + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + return +} + +// GetAuthorization Authorization +func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + params := make(map[string]string) + + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + isTemporary := false + signature := "v2" + temporaryKeys := getTemporaryKeys() + for _, key := range temporaryKeys { + if _, ok := params[key]; ok { + isTemporary = true + if strings.ToLower(key) == "signature" { + signature = "v2" + } else if strings.ToLower(key) == "x-amz-signature" { + signature = "v4" + } + break + } + } + isObs := getIsObs(isTemporary, querysResult, headers) + headers = copyHeaders(headers) + pathStyle := false + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + pathStyle = true + } + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + + if isTemporary { + return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs) + } + signature, region, signedHeaders := parseHeaders(headers) + if signature == "v4" { + conf.signature = SignatureV4 + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestURL") + return nil + } + headerKeys := strings.Split(signedHeaders, ";") + _headers := make(map[string][]string, len(headerKeys)) + for _, headerKey := range headerKeys { + _headers[headerKey] = headers[headerKey] + } + ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers) + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + v2HashPrefix := V2_HASH_PREFIX + if isObs { + v2HashPrefix = OBS_HASH_PREFIX + } + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + } + return + +} + +func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string, + headers map[string][]string, isObs bool) (ret map[string]string) { + + if signature == "v4" { + conf.signature = SignatureV4 + + longDate, ok := params[PARAM_DATE_AMZ_CAMEL] + if !ok { + longDate = params[HEADER_DATE_AMZ] + } + shortDate := longDate[:8] + + credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL] + if !ok { + credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)] + } + + _credential := UrlDecodeWithoutError(credential) + + regions := regionRegex.FindStringSubmatch(_credential) + var region string + if len(regions) >= 2 { + region = regions[1] + } + + _, scope := getCredential(ak, region, shortDate) + + expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL] + if !ok { + expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)] + } + + signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] + if !ok { + signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)] + } + + algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL] + if !ok { + algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)] + } + + if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok { + delete(params, PARAM_SIGNATURE_AMZ_CAMEL) + } else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok { + delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)) + } + + ret = make(map[string]string, 6) + ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm + ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + ret[PARAM_DATE_AMZ_CAMEL] = longDate + ret[PARAM_EXPIRES_AMZ_CAMEL] = expires + ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders + + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestUrl") + return nil + } + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers) + ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + expires, ok := params["Expires"] + if !ok { + expires = params["expires"] + } + headers[HEADER_DATE_CAMEL] = []string{expires} + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + ret = make(map[string]string, 3) + ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false) + ret["AWSAccessKeyId"] = UrlEncode(ak, false) + ret["Expires"] = UrlEncode(expires, false) + } + + return +} diff --git a/modules/setting/setting.go b/modules/setting/setting.go index c11c70ccc..3e71804ab 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -447,6 +447,24 @@ var ( //blockchain config BlockChainHost string CommitValidDate string + + //obs config + Endpoint string + AccessKeyID string + SecretAccessKey string + Bucket string + Location string + BasePath string + //RealPath string + + //modelarts config + ModelArtsHost string + IamHost string + ProjectID string + ProjectName string + ModelArtsUsername string + ModelArtsPassword string + ModelArtsDomain string ) // DateLang transforms standard language locale name to corresponding value in datetime plugin. @@ -1131,6 +1149,23 @@ func NewContext() { sec = Cfg.Section("blockchain") BlockChainHost = sec.Key("HOST").MustString("http://192.168.136.66:3302/") CommitValidDate = sec.Key("COMMIT_VALID_DATE").MustString("2021-01-15") + + sec = Cfg.Section("obs") + Endpoint = sec.Key("ENDPOINT").MustString("112.95.163.82") + AccessKeyID = sec.Key("ACCESS_KEY_ID").MustString("") + SecretAccessKey = sec.Key("SECRET_ACCESS_KEY").MustString("") + Bucket = sec.Key("BUCKET").MustString("testopendata") + Location = sec.Key("LOCATION").MustString("cn-south-222") + BasePath = sec.Key("BASE_PATH").MustString("attachment/") + + sec = Cfg.Section("modelarts") + ModelArtsHost = sec.Key("ENDPOINT").MustString("112.95.163.80") + IamHost = sec.Key("IAMHOST").MustString("112.95.163.80") + ProjectID = sec.Key("PROJECT_ID").MustString("") + ProjectName = sec.Key("PROJECT_NAME").MustString("") + ModelArtsUsername = sec.Key("USERNAME").MustString("") + ModelArtsPassword = sec.Key("PASSWORD").MustString("") + ModelArtsDomain = sec.Key("DOMAIN").MustString("cn-south-222") } func loadInternalToken(sec *ini.Section) string { diff --git a/modules/storage/obs.go b/modules/storage/obs.go new file mode 100755 index 000000000..d174b8fba --- /dev/null +++ b/modules/storage/obs.go @@ -0,0 +1,175 @@ +// Copyright 2020 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package storage + +import ( + "io" + "path" + "strconv" + "strings" + + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/obs" + "code.gitea.io/gitea/modules/setting" +) + +//check if has the object +//todo:修改查询方式 +func ObsHasObject(path string) (bool, error) { + hasObject := false + output, err := ObsCli.ListObjects(&obs.ListObjectsInput{Bucket:setting.Bucket}) + if err != nil { + log.Error("ListObjects failed:%v", err) + return hasObject, err + } + + for _, obj := range output.Contents { + //obj.Key:attachment/0/1/019fd24e-4ef7-41cc-9f85-4a7b8504d958 + if path == obj.Key { + hasObject = true + break + } + } + + return hasObject, nil +} + +func GetObsPartInfos(uuid string, uploadID string) (string, error) { + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: key, + UploadId: uploadID, + }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return "", err + } + + var chunks string + for _, partInfo := range output.Parts { + chunks += strconv.Itoa(partInfo.PartNumber) + "-" + partInfo.ETag + "," + } + + return chunks, nil +} + +func NewObsMultiPartUpload(uuid string) (string, error) { + input := &obs.InitiateMultipartUploadInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + + output, err := ObsCli.InitiateMultipartUpload(input) + if err != nil { + log.Error("InitiateMultipartUpload failed:", err.Error()) + return "", err + } + + return output.UploadId, nil +} + +func CompleteObsMultiPartUpload(uuid string, uploadID string) error { + input := &obs.CompleteMultipartUploadInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + input.UploadId = uploadID + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: input.Key, + UploadId: uploadID, + }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return err + } + + for _, partInfo := range output.Parts { + input.Parts = append(input.Parts, obs.Part{ + PartNumber: partInfo.PartNumber, + ETag: partInfo.ETag, + }) + } + + _, err = ObsCli.CompleteMultipartUpload(input) + if err != nil { + log.Error("CompleteMultipartUpload failed:", err.Error()) + return err + } + + return nil +} + +func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, body io.Reader) (string, error) { + input := &obs.UploadPartInput{} + input.PartNumber = partNumber + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + input.UploadId = uploadId + input.Bucket = setting.Bucket + input.PartSize = partSize + input.Body = body + output, err := ObsCli.UploadPart(input) + if err != nil { + log.Error("UploadPart failed:", err.Error()) + return "", err + } + + return output.ETag, nil +} + +func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) { + /* + input := &obs.CreateSignedUrlInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Expires = int(PresignedUploadPartUrlExpireTime) + input.Method = obs.HTTP_PUT + + input.QueryParams = map[string]string{ + "Bucket": input.Bucket, + "Key": input.Key, + "PartNumber": com.ToStr(partNumber,10), + "UploadId": uploadId, + "PartSize": com.ToStr(partSize,10), + } + + input.Headers = map[string]string{ + + } + + */ + + Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + req, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) + if err != nil { + log.Error("CreateSignedUrl failed:", err.Error()) + return "", err + } + + log.Info(req.URL.String()) + log.Info("", req.Header) + + return req.URL.String(), nil + +} + +func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { + input := &obs.CreateSignedUrlInput{} + input.Method = obs.HttpMethodGet + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + input.Bucket = setting.Bucket + input.Expires = 60 * 60 + + reqParams := make(map[string]string) + reqParams["response-content-disposition"] = "attachment; filename=\"" + fileName + "\"" + input.QueryParams = reqParams + output, err := ObsCli.CreateSignedUrl(input) + if err != nil { + log.Error("CreateSignedUrl failed:", err.Error()) + return "", err + } + + return output.SignedUrl, nil +} diff --git a/modules/storage/storage.go b/modules/storage/storage.go old mode 100644 new mode 100755 index d06ec7208..abf9e6e32 --- a/modules/storage/storage.go +++ b/modules/storage/storage.go @@ -8,6 +8,8 @@ import ( "fmt" "io" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/obs" "code.gitea.io/gitea/modules/setting" ) @@ -40,6 +42,7 @@ func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, sr var ( // Attachments represents attachments storage Attachments ObjectStorage + ObsCli *obs.ObsClient ) // Init init the stoarge @@ -63,6 +66,12 @@ func Init() error { return fmt.Errorf("Unsupported attachment store type: %s", setting.Attachment.StoreType) } + ObsCli, err = obs.New(setting.AccessKeyID, setting.SecretAccessKey, setting.Endpoint) + if err != nil { + log.Error("obs.New failed:", err) + return err + } + if err != nil { return err } diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 8df65bd2a..fec135a7f 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -756,7 +756,11 @@ cloudbrain.commit_image=提交 balance=余额 balance.total_view=余额总览 balance.available=可用余额: -balance.disable=不可用余额: +cloudbrain1=云脑1 +cloudbrain2=云脑2 +cloudbrain_selection=云脑选择 +cloudbrain_platform_selection=选择您准备使用的云脑平台: +confirm_choice=确定 template.items=模板选项 template.git_content=Git数据(默认分支) @@ -2439,6 +2443,7 @@ file_status=文件处理状态: file_init_status=等待上传 waitting_uploading=请等待文件传输完成 md5_computing=计算MD5 +obs-connecting=obs连接中 loading_file=加载文件 uploading=正在上传 upload_complete=上传完成 diff --git a/package-lock.json b/package-lock.json index 42d6926af..42cd4ec4d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1922,9 +1922,16 @@ "axios": { "version": "0.21.1", "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", + "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", "requires": { - "follow-redirects": "1.5.10" + "follow-redirects": "^1.10.0" + }, + "dependencies": { + "follow-redirects": { + "version": "1.13.2", + "resolved": "https://registry.npm.taobao.org/follow-redirects/download/follow-redirects-1.13.2.tgz?cache=0&sync_timestamp=1611606737937&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffollow-redirects%2Fdownload%2Ffollow-redirects-1.13.2.tgz", + "integrity": "sha1-3XPI7/wScoulz0JZ12DqX7g+MUc=" + } } }, "babel-loader": { @@ -1947,6 +1954,28 @@ "object.assign": "^4.1.0" } }, + "babel-polyfill": { + "version": "6.26.0", + "resolved": "https://registry.npm.taobao.org/babel-polyfill/download/babel-polyfill-6.26.0.tgz", + "integrity": "sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM=", + "requires": { + "babel-runtime": "^6.26.0", + "core-js": "^2.5.0", + "regenerator-runtime": "^0.10.5" + }, + "dependencies": { + "core-js": { + "version": "2.6.12", + "resolved": "https://registry.npm.taobao.org/core-js/download/core-js-2.6.12.tgz?cache=0&sync_timestamp=1611040749668&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fcore-js%2Fdownload%2Fcore-js-2.6.12.tgz", + "integrity": "sha1-2TM9+nsGXjR8xWgiGdb2kIWcwuw=" + }, + "regenerator-runtime": { + "version": "0.10.5", + "resolved": "https://registry.npm.taobao.org/regenerator-runtime/download/regenerator-runtime-0.10.5.tgz", + "integrity": "sha1-M2w+/BIgrc7dosn6tntaeVWjNlg=" + } + } + }, "babel-runtime": { "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", @@ -2136,6 +2165,11 @@ "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" }, + "blueimp-md5": { + "version": "2.18.0", + "resolved": "https://registry.npm.taobao.org/blueimp-md5/download/blueimp-md5-2.18.0.tgz", + "integrity": "sha1-EVK+EzXwxrORHtnjbbVPPmrFKTU=" + }, "bn.js": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.1.tgz", @@ -3409,6 +3443,11 @@ "assert-plus": "^1.0.0" } }, + "date-format": { + "version": "3.0.0", + "resolved": "https://registry.npm.taobao.org/date-format/download/date-format-3.0.0.tgz", + "integrity": "sha1-64eANlx9KxURB4+0keZHl4DzrZU=" + }, "dateformat": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-2.2.0.tgz", @@ -4020,6 +4059,38 @@ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, + "esdk-obs-browserjs": { + "version": "3.20.7", + "resolved": "https://registry.npm.taobao.org/esdk-obs-browserjs/download/esdk-obs-browserjs-3.20.7.tgz", + "integrity": "sha1-vhziRlKEhW3PgZPl0DyX68bJI0s=", + "requires": { + "axios": "^0.19.0", + "babel-polyfill": "^6.26.0", + "blueimp-md5": "^2.10.0", + "js-base64": "^2.3.2", + "jssha": "^2.3.1", + "urijs": "^1.19.1" + }, + "dependencies": { + "axios": { + "version": "0.19.2", + "resolved": "https://registry.npm.taobao.org/axios/download/axios-0.19.2.tgz?cache=0&sync_timestamp=1608609215811&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Faxios%2Fdownload%2Faxios-0.19.2.tgz", + "integrity": "sha1-PqNsXYgY0NX4qKl6bTa4bNwAyyc=", + "requires": { + "follow-redirects": "1.5.10" + } + } + } + }, + "esdk-obs-nodejs": { + "version": "3.20.11", + "resolved": "https://registry.npm.taobao.org/esdk-obs-nodejs/download/esdk-obs-nodejs-3.20.11.tgz?cache=0&sync_timestamp=1610351636380&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fesdk-obs-nodejs%2Fdownload%2Fesdk-obs-nodejs-3.20.11.tgz", + "integrity": "sha1-/bMuzu3qoT+xLgmCcgg8yM6MIsE=", + "requires": { + "log4js": "^6.3.0", + "xml2js": "^0.4.23" + } + }, "eslint": { "version": "6.8.0", "resolved": "https://registry.npm.taobao.org/eslint/download/eslint-6.8.0.tgz", @@ -5195,8 +5266,7 @@ "flatted": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true + "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" }, "flatten": { "version": "1.0.3", @@ -5214,23 +5284,23 @@ }, "follow-redirects": { "version": "1.5.10", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz", - "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==", + "resolved": "https://registry.npm.taobao.org/follow-redirects/download/follow-redirects-1.5.10.tgz?cache=0&sync_timestamp=1611606737937&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffollow-redirects%2Fdownload%2Ffollow-redirects-1.5.10.tgz", + "integrity": "sha1-e3qfmuov3/NnhqlP9kPtB/T/Xio=", "requires": { "debug": "=3.1.0" }, "dependencies": { "debug": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "resolved": "https://registry.npm.taobao.org/debug/download/debug-3.1.0.tgz?cache=0&sync_timestamp=1607566533140&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fdebug%2Fdownload%2Fdebug-3.1.0.tgz", + "integrity": "sha1-W7WgZyYotkFJVmuhaBnmFRjGcmE=", "requires": { "ms": "2.0.0" } }, "ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "resolved": "https://registry.npm.taobao.org/ms/download/ms-2.0.0.tgz?cache=0&sync_timestamp=1607433842694&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fms%2Fdownload%2Fms-2.0.0.tgz", "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } @@ -5367,6 +5437,21 @@ "readable-stream": "^2.0.0" } }, + "fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npm.taobao.org/fs/download/fs-0.0.1-security.tgz", + "integrity": "sha1-invTcYa23d84E/I4WLV+yq9eQdQ=" + }, + "fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npm.taobao.org/fs-extra/download/fs-extra-8.1.0.tgz?cache=0&sync_timestamp=1611075469998&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffs-extra%2Fdownload%2Ffs-extra-8.1.0.tgz", + "integrity": "sha1-SdQ8RaiM2Wd2aMt74bRu/bjS4cA=", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + } + }, "fs-minipass": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", @@ -7700,6 +7785,14 @@ "minimist": "^1.2.5" } }, + "jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npm.taobao.org/jsonfile/download/jsonfile-4.0.0.tgz?cache=0&sync_timestamp=1604161797011&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjsonfile%2Fdownload%2Fjsonfile-4.0.0.tgz", + "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", + "requires": { + "graceful-fs": "^4.1.6" + } + }, "jsprim": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", @@ -7712,6 +7805,11 @@ "verror": "1.10.0" } }, + "jssha": { + "version": "2.4.2", + "resolved": "https://registry.npm.taobao.org/jssha/download/jssha-2.4.2.tgz", + "integrity": "sha1-2VCwlWNJKL1rK9odQtqaOnYtZek=" + }, "just-debounce": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/just-debounce/-/just-debounce-1.0.0.tgz", @@ -8184,6 +8282,18 @@ "chalk": "^2.4.2" } }, + "log4js": { + "version": "6.3.0", + "resolved": "https://registry.npm.taobao.org/log4js/download/log4js-6.3.0.tgz", + "integrity": "sha1-EN+vu0NDUaPjAnegC5h5RG9xW8s=", + "requires": { + "date-format": "^3.0.0", + "debug": "^4.1.1", + "flatted": "^2.0.1", + "rfdc": "^1.1.4", + "streamroller": "^2.2.4" + } + }, "longest": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", @@ -11821,6 +11931,11 @@ "resolved": "https://registry.npmjs.org/rework-visit/-/rework-visit-1.0.0.tgz", "integrity": "sha1-mUWygD8hni96ygCtuLyfZA+ELJo=" }, + "rfdc": { + "version": "1.2.0", + "resolved": "https://registry.npm.taobao.org/rfdc/download/rfdc-1.2.0.tgz?cache=0&sync_timestamp=1610744108114&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Frfdc%2Fdownload%2Frfdc-1.2.0.tgz", + "integrity": "sha1-npiUJY9I8oS0PDFDxoBwpPNzuUk=" + }, "rgb-regex": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", @@ -12428,6 +12543,23 @@ "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" }, + "streamroller": { + "version": "2.2.4", + "resolved": "https://registry.npm.taobao.org/streamroller/download/streamroller-2.2.4.tgz", + "integrity": "sha1-wZjO1C25QIamGTYIGHzoCl8rDlM=", + "requires": { + "date-format": "^2.1.0", + "debug": "^4.1.1", + "fs-extra": "^8.1.0" + }, + "dependencies": { + "date-format": { + "version": "2.1.0", + "resolved": "https://registry.npm.taobao.org/date-format/download/date-format-2.1.0.tgz", + "integrity": "sha1-MdW16iEc9f12TNOLr50DPffhJc8=" + } + } + }, "strict-uri-encode": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", @@ -13983,6 +14115,11 @@ "os-name": "^3.1.0" } }, + "universalify": { + "version": "0.1.2", + "resolved": "https://registry.npm.taobao.org/universalify/download/universalify-0.1.2.tgz?cache=0&sync_timestamp=1603179967633&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Funiversalify%2Fdownload%2Funiversalify-0.1.2.tgz", + "integrity": "sha1-tkb2m+OULavOzJ1mOcgNwQXvqmY=" + }, "unquote": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", @@ -14053,6 +14190,11 @@ "punycode": "^2.1.0" } }, + "urijs": { + "version": "1.19.5", + "resolved": "https://registry.npm.taobao.org/urijs/download/urijs-1.19.5.tgz", + "integrity": "sha1-EZaDq0svsL1jfl6m3ZEXvKxo0+Q=" + }, "urix": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", @@ -15035,6 +15177,20 @@ "repeat-string": "^1.5.2" } }, + "xml2js": { + "version": "0.4.23", + "resolved": "https://registry.npm.taobao.org/xml2js/download/xml2js-0.4.23.tgz?cache=0&sync_timestamp=1599054229598&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fxml2js%2Fdownload%2Fxml2js-0.4.23.tgz", + "integrity": "sha1-oMaVFnUkIesqx1juTUzPWIQ+rGY=", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + } + }, + "xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npm.taobao.org/xmlbuilder/download/xmlbuilder-11.0.1.tgz", + "integrity": "sha1-vpuuHIoEbnazESdyY0fQrXACvrM=" + }, "xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", diff --git a/package.json b/package.json index 77e28bd06..ec165423a 100644 --- a/package.json +++ b/package.json @@ -19,9 +19,12 @@ "cssnano": "4.1.10", "domino": "2.1.5", "dropzone": "5.7.2", + "esdk-obs-browserjs": "3.20.7", + "esdk-obs-nodejs": "3.20.11", "fast-glob": "3.2.2", "file-loader": "6.0.0", "fomantic-ui": "2.8.4", + "fs": "0.0.1-security", "highlight.js": "10.0.3", "imports-loader": "0.8.0", "jquery": "3.5.1", diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go old mode 100644 new mode 100755 index 064540a8c..bf4d6d464 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -852,6 +852,9 @@ func RegisterRoutes(m *macaron.Macaron) { m.Group("/cloudbrain", func() { m.Get("/:jobid", repo.GetCloudbrainTask) }, reqRepoReader(models.UnitTypeCloudBrain)) + m.Group("/modelarts", func() { + m.Get("/:jobid", repo.GetModelArtsTask) + }, reqRepoReader(models.UnitTypeCloudBrain)) }, repoAssignment()) }) diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go new file mode 100755 index 000000000..1b58a2fab --- /dev/null +++ b/routers/api/v1/repo/modelarts.go @@ -0,0 +1,45 @@ +// Copyright 2016 The Gogs Authors. All rights reserved. +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package repo + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/modelarts" + "net/http" +) + +func GetModelArtsTask(ctx *context.APIContext) { + var ( + err error + ) + + jobID := ctx.Params(":jobid") + repoID := ctx.Repo.Repository.ID + job, err := models.GetRepoCloudBrainByJobID(repoID, jobID) + if err != nil { + ctx.NotFound(err) + return + } + result, err := modelarts.GetJob(jobID) + if err != nil { + ctx.NotFound(err) + return + } + + job.Status = result.Status + err = models.UpdateJob(job) + if err != nil { + log.Error("UpdateJob failed:", err) + } + + ctx.JSON(http.StatusOK, map[string]interface{}{ + "JobID": jobID, + "JobStatus": result.Status, + }) + +} diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 0258a5373..6874a1eda 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -5,6 +5,16 @@ package repo import ( + contexExt "context" + "encoding/json" + "errors" + "fmt" + "mime/multipart" + "net/http" + "path" + "strconv" + "strings" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" @@ -13,12 +23,6 @@ import ( "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/upload" "code.gitea.io/gitea/modules/worker" - contexExt "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" gouuid "github.com/satori/go.uuid" ) @@ -37,6 +41,15 @@ type CloudBrainDataset struct { CreateTime string `json:"created_at"` } +type UploadForm struct { + UploadID string `form:"uploadId"` + UuID string `form:"uuid"` + PartSize int64 `form:"size"` + Offset int64 `form:"offset"` + PartNumber int `form:"chunkNumber"` + PartFile multipart.File `form:"file"` +} + func RenderAttachmentSettings(ctx *context.Context) { renderAttachmentSettings(ctx) } @@ -130,6 +143,13 @@ func DeleteAttachment(ctx *context.Context) { // GetAttachment serve attachements func GetAttachment(ctx *context.Context) { + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid")) if err != nil { if models.IsErrAttachmentNotExist(err) { @@ -183,19 +203,29 @@ func GetAttachment(ctx *context.Context) { //If we have matched and access to release or issue if setting.Attachment.StoreType == storage.MinioStorageType { - url, err := storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name) - if err != nil { - ctx.ServerError("PresignedGetURL", err) - return + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + url, err = storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name) + if err != nil { + ctx.ServerError("PresignedGetURL", err) + return + } + } else { + url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name) + if err != nil { + ctx.ServerError("ObsGetPreSignedUrl", err) + return + } } + log.Info(url) + if err = increaseDownloadCount(attach, dataSet); err != nil { ctx.ServerError("Update", err) return } http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) - } else { fr, err := storage.Attachments.Open(attach.RelativePath()) if err != nil { @@ -263,13 +293,29 @@ func GetPresignedPutObjectURL(ctx *context.Context) { // AddAttachment response for add attachment record func AddAttachment(ctx *context.Context) { - uuid := ctx.Query("uuid") - has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) if err != nil { - ctx.ServerError("HasObject", err) + ctx.ServerError("checkTypeCloudBrain failed", err) return } + uuid := ctx.Query("uuid") + has := false + if typeCloudBrain == models.TypeCloudBrainOne { + has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + if err != nil { + ctx.ServerError("HasObject", err) + return + } + } else { + has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid) + if err != nil { + ctx.ServerError("ObsHasObject", err) + return + } + } + if !has { ctx.Error(404, "attachment has not been uploaded") return @@ -282,6 +328,7 @@ func AddAttachment(ctx *context.Context) { Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), + Type: typeCloudBrain, }) if err != nil { @@ -291,16 +338,19 @@ func AddAttachment(ctx *context.Context) { if attachment.DatasetID != 0 { if strings.HasSuffix(attachment.Name, ".zip") { - err = worker.SendDecompressTask(contexExt.Background(), uuid) - if err != nil { - log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) - } else { - attachment.DecompressState = models.DecompressStateIng - err = models.UpdateAttachment(attachment) + if typeCloudBrain == models.TypeCloudBrainOne { + err = worker.SendDecompressTask(contexExt.Background(), uuid) if err != nil { - log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) + log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) + } else { + attachment.DecompressState = models.DecompressStateIng + err = models.UpdateAttachment(attachment) + if err != nil { + log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) + } } } + //todo:decompress type_two } } @@ -340,9 +390,16 @@ func UpdateAttachmentDecompressState(ctx *context.Context) { func GetSuccessChunks(ctx *context.Context) { fileMD5 := ctx.Query("md5") + typeCloudBrain := ctx.QueryInt("type") var chunks string - fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID) + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain) if err != nil { if models.IsErrFileChunkNotExist(err) { ctx.JSON(200, map[string]string{ @@ -357,12 +414,22 @@ func GetSuccessChunks(ctx *context.Context) { return } - isExist, err := storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) - if err != nil { - ctx.ServerError("HasObject failed", err) - return + isExist := false + if typeCloudBrain == models.TypeCloudBrainOne { + isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) + if err != nil { + ctx.ServerError("HasObject failed", err) + return + } + } else { + isExist, err = storage.ObsHasObject(models.AttachmentRelativePath(fileChunk.UUID)) + if err != nil { + ctx.ServerError("ObsHasObject failed", err) + return + } } + if isExist { if fileChunk.IsUploaded == models.FileNotUploaded { log.Info("the file has been uploaded but not recorded") @@ -380,10 +447,18 @@ func GetSuccessChunks(ctx *context.Context) { } } - chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) - if err != nil { - ctx.ServerError("GetPartInfos failed", err) - return + if typeCloudBrain == models.TypeCloudBrainOne { + chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) + if err != nil { + ctx.ServerError("GetPartInfos failed", err) + return + } + } else { + chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID) + if err != nil { + ctx.ServerError("GetObsPartInfos failed", err) + return + } } } @@ -445,6 +520,13 @@ func NewMultipart(ctx *context.Context) { return } + typeCloudBrain := ctx.QueryInt("type") + err = checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + if setting.Attachment.StoreType == storage.MinioStorageType { totalChunkCounts := ctx.QueryInt("totalChunkCounts") if totalChunkCounts > minio_ext.MaxPartsCount { @@ -459,10 +541,19 @@ func NewMultipart(ctx *context.Context) { } uuid := gouuid.NewV4().String() - uploadID, err := storage.NewMultiPartUpload(uuid) - if err != nil { - ctx.ServerError("NewMultipart", err) - return + var uploadID string + if typeCloudBrain == models.TypeCloudBrainOne { + uploadID, err = storage.NewMultiPartUpload(uuid) + if err != nil { + ctx.ServerError("NewMultipart", err) + return + } + } else { + uploadID, err = storage.NewObsMultiPartUpload(uuid) + if err != nil { + ctx.ServerError("NewObsMultiPartUpload", err) + return + } } _, err = models.InsertFileChunk(&models.FileChunk{ @@ -472,6 +563,7 @@ func NewMultipart(ctx *context.Context) { Md5: ctx.Query("md5"), Size: fileSize, TotalChunks: totalChunkCounts, + Type: typeCloudBrain, }) if err != nil { @@ -495,25 +587,94 @@ func GetMultipartUploadUrl(ctx *context.Context) { partNumber := ctx.QueryInt("chunkNumber") size := ctx.QueryInt64("size") - if size > minio_ext.MinPartSize { - ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) return } - url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + if size > minio_ext.MinPartSize { + ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + return + } + + url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + return + } + } else { + url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) + return + } + } + + ctx.JSON(200, map[string]string{ + "url": url, + }) +} + +func GetObsKey(ctx *context.Context) { + uuid := gouuid.NewV4().String() + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") + + ctx.JSON(200, map[string]string{ + "uuid": uuid, + "key": key, + "access_key_id": setting.AccessKeyID, + "secret_access_key": setting.SecretAccessKey, + "server": setting.Endpoint, + "bucket": setting.Bucket, + }) +} + +func UploadPart(ctx *context.Context) { + tmp, err := ctx.Req.Body().String() + log.Info(tmp) + + err = ctx.Req.ParseMultipartForm(100*1024*1024) + if err != nil { + ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err)) + return + } + + file, fileHeader, err := ctx.Req.FormFile("file") + log.Info(ctx.Req.Form.Get("file")) + if err != nil { + ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err)) + return + } + + + + log.Info(fileHeader.Filename) + + etag, err := storage.ObsUploadPart("", "", 1, 1, file) if err != nil { - ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err)) return } ctx.JSON(200, map[string]string{ - "url": url, + "etag": etag, }) } func CompleteMultipart(ctx *context.Context) { uuid := ctx.Query("uuid") uploadID := ctx.Query("uploadID") + typeCloudBrain := ctx.QueryInt("type") + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } fileChunk, err := models.GetFileChunkByUUID(uuid) if err != nil { @@ -525,10 +686,18 @@ func CompleteMultipart(ctx *context.Context) { return } - _, err = storage.CompleteMultiPartUpload(uuid, uploadID) - if err != nil { - ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) - return + if typeCloudBrain == models.TypeCloudBrainOne { + _, err = storage.CompleteMultiPartUpload(uuid, uploadID) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) + return + } + } else { + err = storage.CompleteObsMultiPartUpload(uuid, uploadID) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) + return + } } fileChunk.IsUploaded = models.FileUploaded @@ -546,6 +715,7 @@ func CompleteMultipart(ctx *context.Context) { Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), + Type: typeCloudBrain, }) if err != nil { @@ -704,3 +874,11 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) { }) return } + +func checkTypeCloudBrain(typeCloudBrain int) error { + if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo { + log.Error("type error:", typeCloudBrain) + return errors.New("type error") + } + return nil +} diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index 72e36d841..3f60284a0 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -46,7 +46,7 @@ func CloudBrainIndex(ctx *context.Context) { PageSize: setting.UI.IssuePagingNum, }, RepoID: repo.ID, - // SortType: sortType, + Type: models.TypeCloudBrainOne, }) if err != nil { ctx.ServerError("Cloudbrain", err) diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go index 962824bd0..9c8557afa 100755 --- a/routers/repo/dataset.go +++ b/routers/repo/dataset.go @@ -49,7 +49,7 @@ func DatasetIndex(ctx *context.Context) { ctx.NotFound("GetDatasetByRepo", err) return } - err = models.GetDatasetAttachments(dataset) + err = models.GetDatasetAttachments(ctx.QueryInt("type"), dataset) if err != nil { ctx.ServerError("GetDatasetAttachments", err) return @@ -80,6 +80,7 @@ func DatasetIndex(ctx *context.Context) { ctx.Data["Attachments"] = attachments ctx.Data["IsOwner"] = true ctx.Data["StoreType"] = setting.Attachment.StoreType + ctx.Data["Type"] = ctx.QueryInt("type") renderAttachmentSettings(ctx) diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go new file mode 100755 index 000000000..f7543ece2 --- /dev/null +++ b/routers/repo/modelarts.go @@ -0,0 +1,247 @@ +package repo + +import ( + "code.gitea.io/gitea/modules/modelarts" + "errors" + "github.com/unknwon/com" + "strconv" + "strings" + "time" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/auth" + "code.gitea.io/gitea/modules/base" + "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" +) + +const ( + tplModelArtsIndex base.TplName = "repo/modelarts/index" + tplModelArtsNew base.TplName = "repo/modelarts/new" + tplModelArtsShow base.TplName = "repo/modelarts/show" +) + +// MustEnableDataset check if repository enable internal cb +func MustEnableModelArts(ctx *context.Context) { + if !ctx.Repo.CanRead(models.UnitTypeCloudBrain) { + ctx.NotFound("MustEnableCloudbrain", nil) + return + } +} +func ModelArtsIndex(ctx *context.Context) { + MustEnableModelArts(ctx) + repo := ctx.Repo.Repository + page := ctx.QueryInt("page") + if page <= 0 { + page = 1 + } + + ciTasks, count, err := models.Cloudbrains(&models.CloudbrainsOptions{ + ListOptions: models.ListOptions{ + Page: page, + PageSize: setting.UI.IssuePagingNum, + }, + RepoID: repo.ID, + Type: models.TypeCloudBrainTwo, + }) + if err != nil { + ctx.ServerError("Cloudbrain", err) + return + } + + for i, task := range ciTasks { + if task.Status == string(models.JobRunning) { + ciTasks[i].CanDebug = true + } else { + ciTasks[i].CanDebug = false + } + } + + pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, 5) + pager.SetDefaultParams(ctx) + ctx.Data["Page"] = pager + + ctx.Data["PageIsCloudBrain"] = true + ctx.Data["Tasks"] = ciTasks + ctx.HTML(200, tplModelArtsIndex) +} + +func ModelArtsNew(ctx *context.Context) { + ctx.Data["PageIsCloudBrain"] = true + + t := time.Now() + var jobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:] + ctx.Data["job_name"] = jobName + + attachs, err := models.GetModelArtsUserAttachments(ctx.User.ID) + if err != nil { + ctx.ServerError("GetAllUserAttachments failed:", err) + return + } + + ctx.Data["attachments"] = attachs + ctx.Data["dataset_path"] = modelarts.DataSetMountPath + ctx.Data["env"] = modelarts.NotebookEnv + ctx.Data["notebook_type"] = modelarts.NotebookType + ctx.Data["flavor"] = modelarts.FlavorInfo + ctx.HTML(200, tplModelArtsNew) +} + +func ModelArtsCreate(ctx *context.Context, form auth.CreateModelArtsForm) { + ctx.Data["PageIsCloudBrain"] = true + jobName := form.JobName + uuid := form.Attachment + description := form.Description + //repo := ctx.Repo.Repository + + err := modelarts.GenerateTask(ctx, jobName, uuid, description) + if err != nil { + ctx.RenderWithErr(err.Error(), tplModelArtsNew, &form) + return + } + + ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts") +} + +func ModelArtsShow(ctx *context.Context) { + ctx.Data["PageIsCloudBrain"] = true + + var jobID = ctx.Params(":jobid") + task, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + ctx.Data["error"] = err.Error() + ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil) + return + } + + result, err := modelarts.GetJob(jobID) + if err != nil { + ctx.Data["error"] = err.Error() + ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil) + return + } + + if result != nil { + task.Status = result.Status + err = models.UpdateJob(task) + if err != nil { + ctx.Data["error"] = err.Error() + ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil) + return + } + + createTime, _ := com.StrTo(result.CreationTimestamp).Int64() + result.CreateTime = time.Unix(int64(createTime/1000), 0).Format("2006-01-02 15:04:05") + endTime, _ := com.StrTo(result.LatestUpdateTimestamp).Int64() + result.LatestUpdateTime = time.Unix(int64(endTime/1000), 0).Format("2006-01-02 15:04:05") + result.QueuingInfo.BeginTime = time.Unix(int64(result.QueuingInfo.BeginTimestamp/1000), 0).Format("2006-01-02 15:04:05") + result.QueuingInfo.EndTime = time.Unix(int64(result.QueuingInfo.EndTimestamp/1000), 0).Format("2006-01-02 15:04:05") + } + + ctx.Data["task"] = task + ctx.Data["jobID"] = jobID + ctx.Data["result"] = result + ctx.HTML(200, tplModelArtsShow) +} + +func ModelArtsDebug(ctx *context.Context) { + var jobID = ctx.Params(":jobid") + _, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + ctx.ServerError("GetCloudbrainByJobID failed", err) + return + } + + result, err := modelarts.GetJob(jobID) + if err != nil { + ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil) + return + } + + res, err := modelarts.GetJobToken(jobID) + if err != nil { + ctx.RenderWithErr(err.Error(), tplModelArtsIndex, nil) + return + } + + + urls := strings.Split(result.Spec.Annotations.Url, "/") + urlPrefix := result.Spec.Annotations.TargetDomain + for i, url := range urls { + if i > 2 { + urlPrefix += "/" + url + } + } + + //urlPrefix := result.Spec.Annotations.TargetDomain + "/modelarts/internal/hub/notebook/user/" + task.JobID + log.Info(urlPrefix) + debugUrl := urlPrefix + "?token=" + res.Token + ctx.Redirect(debugUrl) +} + +func ModelArtsStop(ctx *context.Context) { + var jobID = ctx.Params(":jobid") + log.Info(jobID) + task, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + ctx.ServerError("GetCloudbrainByJobID failed", err) + return + } + + if task.Status != string(models.JobRunning) { + log.Error("the job(%s) is not running", task.JobName) + ctx.ServerError("the job is not running", errors.New("the job is not running")) + return + } + + param := models.NotebookAction{ + Action: models.ActionStop, + } + res, err := modelarts.StopJob(jobID, param) + if err != nil { + log.Error("StopJob(%s) failed:%v", task.JobName, err.Error()) + ctx.ServerError("StopJob failed", err) + return + } + + task.Status = res.CurrentStatus + err = models.UpdateJob(task) + if err != nil { + ctx.ServerError("UpdateJob failed", err) + return + } + + ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts") +} + +func ModelArtsDel(ctx *context.Context) { + var jobID = ctx.Params(":jobid") + task, err := models.GetCloudbrainByJobID(jobID) + if err != nil { + ctx.ServerError("GetCloudbrainByJobID failed", err) + return + } + + if task.Status != string(models.JobStopped) { + log.Error("the job(%s) has not been stopped", task.JobName) + ctx.ServerError("the job has not been stopped", errors.New("the job has not been stopped")) + return + } + + _, err = modelarts.DelJob(jobID) + if err != nil { + log.Error("DelJob(%s) failed:%v", task.JobName, err.Error()) + ctx.ServerError("DelJob failed", err) + return + } + + err = models.DeleteJob(task) + if err != nil { + ctx.ServerError("DeleteJob failed", err) + return + } + + ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts") +} + diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 8831e20a5..312707314 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -529,6 +529,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/get_multipart_url", repo.GetMultipartUploadUrl) m.Post("/complete_multipart", repo.CompleteMultipart) m.Post("/update_chunk", repo.UpdateMultipart) + m.Post("/upload_part", repo.UploadPart) + m.Get("/get_obs_key", repo.GetObsKey) }, reqSignIn) m.Group("/attachments", func() { @@ -913,6 +915,18 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) }, context.RepoRef()) + m.Group("/modelarts", func() { + m.Get("", reqRepoCloudBrainReader, repo.ModelArtsIndex) + m.Group("/:jobid", func() { + m.Get("", reqRepoCloudBrainReader, repo.ModelArtsShow) + m.Get("/debug", reqRepoCloudBrainReader, repo.ModelArtsDebug) + m.Post("/stop", reqRepoCloudBrainWriter, repo.ModelArtsStop) + m.Post("/del", reqRepoCloudBrainWriter, repo.ModelArtsDel) + }) + m.Get("/create", reqRepoCloudBrainWriter, repo.ModelArtsNew) + m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateModelArtsForm{}), repo.ModelArtsCreate) + }, context.RepoRef()) + m.Group("/blockchain", func() { m.Get("", repo.BlockChainIndex) }, context.RepoRef()) diff --git a/templates/base/head.tmpl b/templates/base/head.tmpl old mode 100644 new mode 100755 index cad2c976d..ce5d9892d --- a/templates/base/head.tmpl +++ b/templates/base/head.tmpl @@ -175,6 +175,7 @@ {{end}} {{template "custom/header" .}} + {{template "custom/body_outer_pre" .}} diff --git a/templates/explore/dataset_list.tmpl b/templates/explore/dataset_list.tmpl index f7caf4f36..9200274a9 100755 --- a/templates/explore/dataset_list.tmpl +++ b/templates/explore/dataset_list.tmpl @@ -31,9 +31,7 @@
{{svg "octicon-tasklist" 16}} {{$.i18n.Tr (printf "dataset.task.%s" .Task)}} {{svg "octicon-tag" 16}}{{$.i18n.Tr (printf "dataset.category.%s" .Category)}} - {{if ne .DownloadTimes 0}} {{svg "octicon-flame" 16}} {{.DownloadTimes}} - {{end}}
diff --git a/templates/repo/cloudbrain/index.tmpl b/templates/repo/cloudbrain/index.tmpl index 778c70e50..e2197e3b8 100755 --- a/templates/repo/cloudbrain/index.tmpl +++ b/templates/repo/cloudbrain/index.tmpl @@ -198,7 +198,7 @@
-

{{.i18n.Tr "repo.cloudbrain"}}

+

{{.i18n.Tr "repo.cloudbrain1"}}

diff --git a/templates/repo/datasets/dataset.tmpl b/templates/repo/datasets/dataset.tmpl old mode 100644 new mode 100755 index 9e533acd3..9a42c7a56 --- a/templates/repo/datasets/dataset.tmpl +++ b/templates/repo/datasets/dataset.tmpl @@ -2,8 +2,20 @@
-
- +
diff --git a/templates/repo/datasets/dataset_list.tmpl b/templates/repo/datasets/dataset_list.tmpl index 53a8c8273..a86b7c6ca 100755 --- a/templates/repo/datasets/dataset_list.tmpl +++ b/templates/repo/datasets/dataset_list.tmpl @@ -3,7 +3,7 @@
@@ -14,7 +14,7 @@ {{svg "octicon-flame" 16}} {{(.DownloadCount | PrettyNumber)}}
-
+
{{svg "octicon-file" 16}}
diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index 2da6ca8bf..413bce917 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -1,6 +1,17 @@ {{template "base/head" .}}
- {{template "repo/header" .}} + {{template "repo/header" .}} +
+{{template "base/head" .}} + + + + +
+
+
+
+
+
+
+
+
+ + +
+ +
+ {{template "repo/header" .}} + +
+ + +
+
+

{{.i18n.Tr "repo.cloudbrain2"}}

+
+ +
+
+ +
+ {{if .Permission.CanWrite $.UnitTypeCloudBrain}} + {{.i18n.Tr "repo.cloudbrain.new"}} {{end}} +
+
+ + +
+ + +
+
+
+ + +
+
+
+
+
+ +
+
+
+ + +
+ {{range .Tasks}} +
+
+ + + + + +
+ {{.Status}} +
+ + +
+ {{svg "octicon-flame" 16}} {{TimeSinceUnix .CreatedUnix $.Lang}} +
+ + +
+ + + 查看 + + +
+ + +
+
+ + {{$.CsrfTokenHtml}} + 删除 + +
+
+ + +
+ +
+ + +
+
+
+ {{$.CsrfTokenHtml}} + 停止 +
+
+
+ + + + +
+
+ {{end}} {{template "base/paginate" .}} +
+ +
+
+
+ +
+ +
+
+ +
+ + +
+ +
+ +
+{{template "base/footer" .}} + + \ No newline at end of file diff --git a/templates/repo/modelarts/new.tmpl b/templates/repo/modelarts/new.tmpl new file mode 100755 index 000000000..bb52e3585 --- /dev/null +++ b/templates/repo/modelarts/new.tmpl @@ -0,0 +1,184 @@ +{{template "base/head" .}} + + +
+
+
+
+
+
+
+
+
+
+ {{template "repo/header" .}} +
+
+ {{template "base/alert" .}} +
+ {{.CsrfTokenHtml}} +

+ {{.i18n.Tr "repo.cloudbrain.new"}} +

+
+ +
+ + +
+ +
+ + +
+ +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + + {{.i18n.Tr "repo.cloudbrain.cancel"}} +
+
+
+
+
+
+{{template "base/footer" .}} + + \ No newline at end of file diff --git a/templates/repo/modelarts/show.tmpl b/templates/repo/modelarts/show.tmpl new file mode 100755 index 000000000..3f914b56d --- /dev/null +++ b/templates/repo/modelarts/show.tmpl @@ -0,0 +1,122 @@ +{{template "base/head" .}} +
+{{template "repo/header" .}} +
+
+ {{template "base/alert" .}} + +

+ 返回 +

+
+
+ {{with .task}} +

任务名称: {{.JobName}}

+ {{end}} +
+
+

任务结果:

+ {{with .result}} + + + + + + + + + + + + + + + +
状态 {{.Status}}
开始时间 {{.CreateTime}}
最后更新时间 {{.LatestUpdateTime}}
+ {{end}} +
+
+ {{with .result}} + + + + + + + + + + + + + + +
配置信息
开发环境类型 {{.Profile.DeType}}
硬件类型 {{.Profile.FlavorType}}
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
机器规格详情
机器规格 {{.Flavor}}
规格名称 {{.FlavorDetails.Name}}
规格销售状态 {{.FlavorDetails.Status}}
排队个数 {{.FlavorDetails.QueuingNum}}
排到队的剩余时间(秒) {{.FlavorDetails.QueueLeftTime}}
自动停止时间(秒) {{.FlavorDetails.Duration}}
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
排队信息
实例状态 {{.QueuingInfo.Status}}
实例排队的开始时间 {{.QueuingInfo.BeginTime}}
排到队的剩余时间(秒) {{.QueuingInfo.RemainTime}}
实例排队的预计停止时间 {{.QueuingInfo.EndTime}}
实例在队列中的排位 {{.QueuingInfo.Rank}}
+ {{end}} +
+
+ +
+
+
+{{template "base/footer" .}} diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index 71b44a39c..c453daf16 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -245,7 +245,8 @@ export default { file_name: file.name, size: file.size, dataset_id: file.datasetId, - _csrf: csrf + _csrf: csrf, + type:0 }) ); } @@ -450,4 +451,4 @@ export default { border-bottom: 1px solid #dadce0; min-height: 0; } - + \ No newline at end of file diff --git a/web_src/js/components/ObsUploader.vue b/web_src/js/components/ObsUploader.vue new file mode 100755 index 000000000..e61981c7e --- /dev/null +++ b/web_src/js/components/ObsUploader.vue @@ -0,0 +1,298 @@ + + + + + \ No newline at end of file diff --git a/web_src/js/index.js b/web_src/js/index.js index 140a855a0..ddb69f701 100755 --- a/web_src/js/index.js +++ b/web_src/js/index.js @@ -29,6 +29,7 @@ import { } from './features/notification.js'; import {createCodeEditor} from './features/codeeditor.js'; import MinioUploader from './components/MinioUploader.vue'; +import ObsUploader from './components/ObsUploader.vue' const {AppSubUrl, StaticUrlPrefix, csrf} = window.config; @@ -2955,6 +2956,7 @@ $(document).ready(async () => { initCodeView(); initVueApp(); initVueUploader(); + initObsUploader(); initTeamSettings(); initCtrlEnterSubmit(); initNavbarContentToggle(); @@ -3641,6 +3643,21 @@ function initVueUploader() { }); } +// 新增 +function initObsUploader() { + const el = document.getElementById('obsUploader'); + if (!el) { + return; + } + + new Vue({ + el: '#obsUploader', + components: {ObsUploader}, + template: '' + }); +} + + window.timeAddManual = function () { $('.mini.modal') .modal({ diff --git a/webpack.config.js b/webpack.config.js old mode 100644 new mode 100755 index d6a632ad1..a08810ebc --- a/webpack.config.js +++ b/webpack.config.js @@ -44,6 +44,9 @@ module.exports = { filename: 'js/[name].js', chunkFilename: 'js/[name].js', }, + node:{ + fs: 'empty' + }, optimization: { minimize: isProduction, minimizer: [ @@ -237,7 +240,7 @@ module.exports = { }), new MonacoWebpackPlugin({ filename: 'js/monaco-[name].worker.js', - }), + }) ], performance: { hints: false,