From 7a95e3edb0a4aff681b009e32612aa9f7dc1a441 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Thu, 28 Jan 2021 20:29:32 +0800 Subject: [PATCH 01/36] add obs sdk --- modules/obs/auth.go | 466 +++++++++++++++++ modules/obs/client.go | 1307 ++++++++++++++++++++++++++++++++++++++++++++++ modules/obs/conf.go | 471 +++++++++++++++++ modules/obs/const.go | 932 +++++++++++++++++++++++++++++++++ modules/obs/convert.go | 880 +++++++++++++++++++++++++++++++ modules/obs/error.go | 35 ++ modules/obs/extension.go | 37 ++ modules/obs/http.go | 566 ++++++++++++++++++++ modules/obs/log.go | 317 +++++++++++ modules/obs/model.go | 1236 +++++++++++++++++++++++++++++++++++++++++++ modules/obs/pool.go | 543 +++++++++++++++++++ modules/obs/temporary.go | 790 ++++++++++++++++++++++++++++ modules/obs/trait.go | 909 ++++++++++++++++++++++++++++++++ modules/obs/transfer.go | 873 +++++++++++++++++++++++++++++++ modules/obs/util.go | 536 +++++++++++++++++++ 15 files changed, 9898 insertions(+) create mode 100755 modules/obs/auth.go create mode 100755 modules/obs/client.go create mode 100755 modules/obs/conf.go create mode 100755 modules/obs/const.go create mode 100755 modules/obs/convert.go create mode 100755 modules/obs/error.go create mode 100755 modules/obs/extension.go create mode 100755 modules/obs/http.go create mode 100755 modules/obs/log.go create mode 100755 modules/obs/model.go create mode 100755 modules/obs/pool.go create mode 100755 modules/obs/temporary.go create mode 100755 modules/obs/trait.go create mode 100755 modules/obs/transfer.go create mode 100755 modules/obs/util.go diff --git a/modules/obs/auth.go b/modules/obs/auth.go new file mode 100755 index 000000000..607a5ec39 --- /dev/null +++ b/modules/obs/auth.go @@ -0,0 +1,466 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "fmt" + "net/url" + "sort" + "strings" + "time" +) + +func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, expires int64) (requestURL string, err error) { + isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == "" + if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken + } + } + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + hostName := parsedRequestURL.Host + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + if isV4 { + date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0]) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + delete(headers, HEADER_DATE_CAMEL) + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + if len(headers[HEADER_HOST_CAMEL]) != 0 { + index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":") + if index != -1 { + port := headers[HEADER_HOST_CAMEL][0][index+1:] + if port == "80" || port == "443" { + headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]} + } + } + + } + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate) + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires) + params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";") + + requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + return "", _err + } + + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers) + signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate) + + requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false)) + + } else { + originDate := headers[HEADER_DATE_CAMEL][0] + date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate) + if parseDateErr != nil { + doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr) + return "", parseDateErr + } + expires += date.Unix() + headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)} + + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs) + signature := UrlEncode(Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign))), false) + if strings.Index(requestURL, "?") < 0 { + requestURL += "?" + } else { + requestURL += "&" + } + delete(headers, HEADER_DATE_CAMEL) + + if obsClient.conf.signature != SignatureObs { + requestURL += "AWS" + } + requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(obsClient.conf.securityProvider.ak, false), expires, signature) + } + } + + return +} + +func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, hostName string) (requestURL string, err error) { + isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == "" + if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = []string{obsClient.conf.securityProvider.securityToken} + } else { + headers[HEADER_STS_TOKEN_AMZ] = []string{obsClient.conf.securityProvider.securityToken} + } + } + isObs := obsClient.conf.signature == SignatureObs + requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true) + parsedRequestURL, err := url.Parse(requestURL) + if err != nil { + return "", err + } + encodeHeaders(headers) + + if hostName == "" { + hostName = parsedRequestURL.Host + } + + isV4 := obsClient.conf.signature == SignatureV4 + prepareHostAndDate(headers, hostName, isV4) + + if isAkSkEmpty { + doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization") + } else { + ak := obsClient.conf.securityProvider.ak + sk := obsClient.conf.securityProvider.sk + var authorization string + if isV4 { + headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD} + ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers) + authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else { + ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + hashPrefix := V2_HASH_PREFIX + if isObs { + hashPrefix = OBS_HASH_PREFIX + } + authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"]) + } + headers[HEADER_AUTH_CAMEL] = []string{authorization} + } + return +} + +func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) { + headers[HEADER_HOST_CAMEL] = []string{hostName} + if date, ok := headers[HEADER_DATE_AMZ]; ok { + flag := false + if len(date) == 1 { + if isV4 { + if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)} + flag = true + } + } else { + if strings.HasSuffix(date[0], "GMT") { + headers[HEADER_DATE_CAMEL] = []string{date[0]} + flag = true + } + } + } + if !flag { + delete(headers, HEADER_DATE_AMZ) + } + } + if _, ok := headers[HEADER_DATE_CAMEL]; !ok { + headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())} + } +} + +func encodeHeaders(headers map[string][]string) { + for key, values := range headers { + for index, value := range values { + values[index] = UrlEncode(value, true) + } + headers[key] = values + } +} + +func attachHeaders(headers map[string][]string, isObs bool) string { + length := len(headers) + _headers := make(map[string][]string, length) + keys := make([]string, 0, length) + + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + prefixheader := HEADER_PREFIX + if isObs { + prefixheader = HEADER_PREFIX_OBS + } + if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) { + keys = append(keys, _key) + _headers[_key] = value + } + } else { + delete(headers, key) + } + } + + for _, interestedHeader := range interestedHeaders { + if _, ok := _headers[interestedHeader]; !ok { + _headers[interestedHeader] = []string{""} + keys = append(keys, interestedHeader) + } + } + dateCamelHeader := PARAM_DATE_AMZ_CAMEL + dataHeader := HEADER_DATE_AMZ + if isObs { + dateCamelHeader = PARAM_DATE_OBS_CAMEL + dataHeader = HEADER_DATE_OBS + } + if _, ok := _headers[HEADER_DATE_CAMEL]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + if _, ok := _headers[dataHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } else if _, ok := headers[dateCamelHeader]; ok { + _headers[HEADER_DATE_CAMEL] = []string{""} + } + } + + sort.Strings(keys) + + stringToSign := make([]string, 0, len(keys)) + for _, key := range keys { + var value string + prefixHeader := HEADER_PREFIX + prefixMetaHeader := HEADER_PREFIX_META + if isObs { + prefixHeader = HEADER_PREFIX_OBS + prefixMetaHeader = HEADER_PREFIX_META_OBS + } + if strings.HasPrefix(key, prefixHeader) { + if strings.HasPrefix(key, prefixMetaHeader) { + for index, v := range _headers[key] { + value += strings.TrimSpace(v) + if index != len(_headers[key])-1 { + value += "," + } + } + } else { + value = strings.Join(_headers[key], ",") + } + value = fmt.Sprintf("%s:%s", key, value) + } else { + value = strings.Join(_headers[key], ",") + } + stringToSign = append(stringToSign, value) + } + return strings.Join(stringToSign, "\n") +} + +func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string { + stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "") + + var isSecurityToken bool + var securityToken []string + if isObs { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS] + } else { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + parmas := strings.Split(canonicalizedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + } + logStringToSign := stringToSign + if isSecurityToken && len(securityToken) > 0 { + logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign) + return stringToSign +} + +func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string { + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))} +} + +func getScope(region, shortDate string) string { + return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX) +} + +func getCredential(ak, region, shortDate string) (string, string) { + scope := getScope(region, shortDate) + return fmt.Sprintf("%s/%s", ak, scope), scope +} + +func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string { + canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4) + canonicalRequest = append(canonicalRequest, method) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, canonicalizedURL) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, queryURL) + canonicalRequest = append(canonicalRequest, "\n") + + for _, signedHeader := range signedHeaders { + values, _ := headers[signedHeader] + for _, value := range values { + canonicalRequest = append(canonicalRequest, signedHeader) + canonicalRequest = append(canonicalRequest, ":") + canonicalRequest = append(canonicalRequest, value) + canonicalRequest = append(canonicalRequest, "\n") + } + } + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";")) + canonicalRequest = append(canonicalRequest, "\n") + canonicalRequest = append(canonicalRequest, payload) + + _canonicalRequest := strings.Join(canonicalRequest, "") + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken { + securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ] + } + var query []string + if !isSecurityToken { + query = strings.Split(queryURL, "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]} + isSecurityToken = true + } + } + } + } + logCanonicalRequest := _canonicalRequest + if isSecurityToken && len(securityToken) > 0 { + logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1) + } + doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest) + + stringToSign := make([]string, 0, 7) + stringToSign = append(stringToSign, V4_HASH_PREFIX) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, longDate) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, scope) + stringToSign = append(stringToSign, "\n") + stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest))) + + _stringToSign := strings.Join(stringToSign, "") + + doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign) + return _stringToSign +} + +func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) { + length := len(headers) + _headers := make(map[string][]string, length) + signedHeaders := make([]string, 0, length) + for key, value := range headers { + _key := strings.ToLower(strings.TrimSpace(key)) + if _key != "" { + signedHeaders = append(signedHeaders, _key) + _headers[_key] = value + } else { + delete(headers, key) + } + } + sort.Strings(signedHeaders) + return signedHeaders, _headers +} + +func getSignature(stringToSign, sk, region, shortDate string) string { + key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate)) + key = HmacSha256(key, []byte(region)) + key = HmacSha256(key, []byte(V4_SERVICE_NAME)) + key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX)) + return Hex(HmacSha256(key, []byte(stringToSign))) +} + +// V4Auth is a wrapper for v4Auth +func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers) +} + +func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string { + var t time.Time + if val, ok := headers[HEADER_DATE_AMZ]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok { + var err error + t, err = time.Parse(LONG_DATE_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[HEADER_DATE_CAMEL]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok { + var err error + t, err = time.Parse(RFC1123_FORMAT, val[0]) + if err != nil { + t = time.Now().UTC() + } + } else { + t = time.Now().UTC() + } + shortDate := t.Format(SHORT_DATE_FORMAT) + longDate := t.Format(LONG_DATE_FORMAT) + + signedHeaders, _headers := getSignedHeaders(headers) + + credential, scope := getCredential(ak, region, shortDate) + + payload := UNSIGNED_PAYLOAD + if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok { + payload = val[0] + } + stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers) + + signature := getSignature(stringToSign, sk, region, shortDate) + + ret := make(map[string]string, 3) + ret["Credential"] = credential + ret["SignedHeaders"] = strings.Join(signedHeaders, ";") + ret["Signature"] = signature + return ret +} diff --git a/modules/obs/client.go b/modules/obs/client.go new file mode 100755 index 000000000..731f9f465 --- /dev/null +++ b/modules/obs/client.go @@ -0,0 +1,1307 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strings" +) + +// ObsClient defines OBS client. +type ObsClient struct { + conf *config + httpClient *http.Client +} + +// New creates a new ObsClient instance. +func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) { + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, endpoint: endpoint} + conf.maxRetryCount = -1 + conf.maxRedirectCount = -1 + for _, configurer := range configurers { + configurer(conf) + } + + if err := conf.initConfigWithDefault(); err != nil { + return nil, err + } + err := conf.getTransport() + if err != nil { + return nil, err + } + + if isWarnLogEnabled() { + info := make([]string, 3) + info[0] = fmt.Sprintf("[OBS SDK Version=%s", obsSdkVersion) + info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint) + accessMode := "Virtual Hosting" + if conf.pathStyle { + accessMode = "Path" + } + info[2] = fmt.Sprintf("Access Mode=%s]", accessMode) + doLog(LEVEL_WARN, strings.Join(info, "];[")) + } + doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf) + obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}} + return obsClient, nil +} + +// Refresh refreshes ak, sk and securityToken for obsClient. +func (obsClient ObsClient) Refresh(ak, sk, securityToken string) { + sp := &securityProvider{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)} + obsClient.conf.securityProvider = sp +} + +// Close closes ObsClient. +func (obsClient ObsClient) Close() { + obsClient.httpClient = nil + obsClient.conf.transport.CloseIdleConnections() + obsClient.conf = nil +} + +// ListBuckets lists buckets. +// +// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order. +func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) { + if input == nil { + input = &ListBucketsInput{} + } + output = &ListBucketsOutput{} + err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// CreateBucket creates a bucket. +// +// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS. +func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("CreateBucketInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucket deletes a bucket. +// +// You can use this API to delete a bucket. The bucket to be deleted must be empty +// (containing no objects, noncurrent object versions, or part fragments). +func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicy sets bucket storage class. +// +// You can use this API to set storage class for bucket. +func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketStoragePolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputS3 *getBucketStoragePolicyOutputS3 + outputS3 = &getBucketStoragePolicyOutputS3{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputS3.BaseModel + output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass) + return +} + +func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + var outputObs *getBucketStoragePolicyOutputObs + outputObs = &getBucketStoragePolicyOutputObs{} + err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions) + if err != nil { + output = nil + return + } + output.BaseModel = outputObs.BaseModel + output.StorageClass = outputObs.StorageClass + return +} + +// GetBucketStoragePolicy gets bucket storage class. +// +// You can use this API to obtain the storage class of a bucket. +func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketStoragePolicyObs(bucketName, extensions) + } + return obsClient.getBucketStoragePolicyS3(bucketName, extensions) +} + +// ListObjects lists objects in a bucket. +// +// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed. +func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) { + if input == nil { + return nil, errors.New("ListObjectsInput is nil") + } + output = &ListObjectsOutput{} + err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListVersions lists versioning objects in a bucket. +// +// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed. +func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) { + if input == nil { + return nil, errors.New("ListVersionsInput is nil") + } + output = &ListVersionsOutput{} + err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListMultipartUploads lists the multipart uploads. +// +// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket. +func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) { + if input == nil { + return nil, errors.New("ListMultipartUploadsInput is nil") + } + output = &ListMultipartUploadsOutput{} + err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketQuota sets the bucket quota. +// +// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1. +func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketQuotaInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketQuota gets the bucket quota. +// +// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota. +func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions) + if err != nil { + output = nil + } + return +} + +// HeadBucket checks whether a bucket exists. +// +// You can use this API to check whether a bucket exists. +func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions) + if err != nil { + output = nil + } + return +} + +// HeadObject checks whether an object exists. +// +// You can use this API to check whether an object exists. +func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("HeadObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadata gets the metadata of a bucket. +// +// You can use this API to send a HEAD request to a bucket to obtain the bucket +// metadata such as the storage class and CORS rules (if set). +func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// SetObjectMetadata sets object metadata. +func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) { + output = &SetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseSetObjectMetadataOutput(output) + } + return +} + +// GetBucketStorageInfo gets storage information about a bucket. +// +// You can use this API to obtain storage information about a bucket, including the +// bucket size and number of objects in the bucket. +func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputS3 *getBucketLocationOutputS3 + outputS3 = &getBucketLocationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputS3.BaseModel + output.Location = outputS3.Location + } + return +} +func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + var outputObs *getBucketLocationOutputObs + outputObs = &getBucketLocationOutputObs{} + err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Location = outputObs.Location + } + return +} + +// GetBucketLocation gets the location of a bucket. +// +// You can use this API to obtain the bucket location. +func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) { + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketLocationObs(bucketName, extensions) + } + return obsClient.getBucketLocationS3(bucketName, extensions) +} + +// SetBucketAcl sets the bucket ACL. +// +// You can use this API to set the ACL for a bucket. +func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} +func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + var outputObs *getBucketACLOutputObs + outputObs = &getBucketACLOutputObs{} + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions) + if err != nil { + output = nil + } else { + output.BaseModel = outputObs.BaseModel + output.Owner = outputObs.Owner + output.Grants = make([]Grant, 0, len(outputObs.Grants)) + for _, valGrant := range outputObs.Grants { + tempOutput := Grant{} + tempOutput.Delivered = valGrant.Delivered + tempOutput.Permission = valGrant.Permission + tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName + tempOutput.Grantee.ID = valGrant.Grantee.ID + tempOutput.Grantee.Type = valGrant.Grantee.Type + tempOutput.Grantee.URI = GroupAllUsers + + output.Grants = append(output.Grants, tempOutput) + } + } + return +} + +// GetBucketAcl gets the bucket ACL. +// +// You can use this API to obtain a bucket ACL. +func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + if obsClient.conf.signature == SignatureObs { + return obsClient.getBucketACLObs(bucketName, extensions) + } + err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicy sets the bucket policy. +// +// You can use this API to set a bucket policy. If the bucket already has a policy, the +// policy will be overwritten by the one specified in this request. +func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketPolicy is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicy gets the bucket policy. +// +// You can use this API to obtain the policy of a bucket. +func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicy deletes the bucket policy. +// +// You can use this API to delete the policy of a bucket. +func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketCors sets CORS rules for a bucket. +// +// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests. +func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketCorsInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketCors gets CORS rules of a bucket. +// +// You can use this API to obtain the CORS rules of a specified bucket. +func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCors deletes CORS rules of a bucket. +// +// You can use this API to delete the CORS rules of a specified bucket. +func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioning sets the versioning status for a bucket. +// +// You can use this API to set the versioning status for a bucket. +func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketVersioningInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioning gets the versioning status of a bucket. +// +// You can use this API to obtain the versioning status of a bucket. +func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfiguration sets website hosting for a bucket. +// +// You can use this API to set website hosting for a bucket. +func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketWebsiteConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket. +// +// You can use this API to obtain the website hosting settings of a bucket. +func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket. +// +// You can use this API to delete the website hosting settings of a bucket. +func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfiguration sets the bucket logging. +// +// You can use this API to configure access logging for a bucket. +func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLoggingConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfiguration gets the logging settings of a bucket. +// +// You can use this API to obtain the access logging settings of a bucket. +func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket. +// +// You can use this API to set lifecycle rules for a bucket, to periodically transit +// storage classes of objects and delete objects in the bucket. +func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketLifecycleConfigurationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket. +// +// You can use this API to obtain the lifecycle rules of a bucket. +func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket. +// +// You can use this API to delete all lifecycle rules of a bucket. +func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketTagging sets bucket tags. +// +// You can use this API to set bucket tags. +func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketTaggingInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketTagging gets bucket tags. +// +// You can use this API to obtain the tags of a specified bucket. +func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTagging deletes bucket tags. +// +// You can use this API to delete the tags of a specified bucket. +func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketNotification sets event notification for a bucket. +// +// You can use this API to configure event notification for a bucket. You will be notified of all +// specified operations performed on the bucket. +func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketNotificationInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketNotification gets event notification settings of a bucket. +// +// You can use this API to obtain the event notification configuration of a bucket. +func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) { + if obsClient.conf.signature != SignatureObs { + return obsClient.getBucketNotificationS3(bucketName, extensions) + } + output = &GetBucketNotificationOutput{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions) + if err != nil { + output = nil + } + return +} + +func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) { + outputS3 := &getBucketNotificationOutputS3{} + err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions) + if err != nil { + return nil, err + } + + output = &GetBucketNotificationOutput{} + output.BaseModel = outputS3.BaseModel + topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations)) + for _, topicConfigurationS3 := range outputS3.TopicConfigurations { + topicConfiguration := TopicConfiguration{} + topicConfiguration.ID = topicConfigurationS3.ID + topicConfiguration.Topic = topicConfigurationS3.Topic + topicConfiguration.FilterRules = topicConfigurationS3.FilterRules + + events := make([]EventType, 0, len(topicConfigurationS3.Events)) + for _, event := range topicConfigurationS3.Events { + events = append(events, ParseStringToEventType(event)) + } + topicConfiguration.Events = events + topicConfigurations = append(topicConfigurations, topicConfiguration) + } + output.TopicConfigurations = topicConfigurations + return +} + +// DeleteObject deletes an object. +// +// You can use this API to delete an object from a specified bucket. +func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectInput is nil") + } + output = &DeleteObjectOutput{} + err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjects deletes objects in a batch. +// +// You can use this API to batch delete objects from a specified bucket. +func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) { + if input == nil { + return nil, errors.New("DeleteObjectsInput is nil") + } + output = &DeleteObjectsOutput{} + err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetObjectAcl sets ACL for an object. +// +// You can use this API to set the ACL for an object in a specified bucket. +func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetObjectAclInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectAcl gets the ACL of an object. +// +// You can use this API to obtain the ACL of an object in a specified bucket. +func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectAclInput is nil") + } + output = &GetObjectAclOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObject restores an object. +func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("RestoreObjectInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadata gets object metadata. +// +// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata. +func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectMetadataInput is nil") + } + output = &GetObjectMetadataOutput{} + err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObject downloads object. +// +// You can use this API to download an object in a specified bucket. +func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) { + if input == nil { + return nil, errors.New("GetObjectInput is nil") + } + output = &GetObjectOutput{} + err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObject uploads an object to the specified bucket. +func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutObjectInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + output = &PutObjectOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if input.ContentLength > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength} + } + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + return contentType + } + if contentType, ok := mimeTypes[strings.ToLower(sourceFile[strings.LastIndex(sourceFile, ".")+1:])]; ok { + return contentType + } + return +} + +func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool { + if input.ContentType == "" && input.Key != "" { + return true + } + return false +} + +// PutFile uploads a file to the specified bucket. +func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) { + if input == nil { + return nil, errors.New("PutFileInput is nil") + } + + var body io.Reader + sourceFile := strings.TrimSpace(input.SourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + if input.ContentLength > 0 { + if input.ContentLength > stat.Size() { + input.ContentLength = stat.Size() + } + fileReaderWrapper.totalCount = input.ContentLength + } else { + fileReaderWrapper.totalCount = stat.Size() + } + body = fileReaderWrapper + } + + _input := &PutObjectInput{} + _input.PutObjectBasicInput = input.PutObjectBasicInput + _input.Body = body + + if obsClient.isGetContentType(_input) { + _input.ContentType = obsClient.getContentType(_input, sourceFile) + } + + output = &PutObjectOutput{} + err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObject creates a copy for an existing object. +// +// You can use this API to create a copy for an object in a specified bucket. +func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) { + if input == nil { + return nil, errors.New("CopyObjectInput is nil") + } + + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyObjectOutput{} + err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("AbortMultipartUploadInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUpload initializes a multipart upload. +func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("InitiateMultipartUploadInput is nil") + } + + if input.ContentType == "" && input.Key != "" { + if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok { + input.ContentType = contentType + } + } + + output = &InitiateMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + } + return +} + +// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket +// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB, +// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000. +func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) { + if _input == nil { + return nil, errors.New("UploadPartInput is nil") + } + + if _input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + input := &UploadPartInput{} + input.Bucket = _input.Bucket + input.Key = _input.Key + input.PartNumber = _input.PartNumber + input.UploadId = _input.UploadId + input.ContentMD5 = _input.ContentMD5 + input.SourceFile = _input.SourceFile + input.Offset = _input.Offset + input.PartSize = _input.PartSize + input.SseHeader = _input.SseHeader + input.Body = _input.Body + + output = &UploadPartOutput{} + var repeatable bool + if input.Body != nil { + _, repeatable = input.Body.(*strings.Reader) + if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 { + input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize} + } + } else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileSize := stat.Size() + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + if input.Offset < 0 || input.Offset > fileSize { + input.Offset = 0 + } + + if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) { + input.PartSize = fileSize - input.Offset + } + fileReaderWrapper.totalCount = input.PartSize + if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil { + return nil, err + } + input.Body = fileReaderWrapper + repeatable = true + } + if repeatable { + err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } else { + err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + } + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} + +// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input == nil { + return nil, errors.New("CompleteMultipartUploadInput is nil") + } + + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + + var parts partSlice = input.Parts + sort.Sort(parts) + + output = &CompleteMultipartUploadOutput{} + err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + } + return +} + +// ListParts lists the uploaded parts in a bucket by using the multipart upload ID. +func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) { + if input == nil { + return nil, errors.New("ListPartsInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + output = &ListPartsOutput{} + err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// CopyPart copy a part to a specified bucket by using a specified multipart upload ID. +// +// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID. +func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) { + if input == nil { + return nil, errors.New("CopyPartInput is nil") + } + if input.UploadId == "" { + return nil, errors.New("UploadId is empty") + } + if strings.TrimSpace(input.CopySourceBucket) == "" { + return nil, errors.New("Source bucket is empty") + } + if strings.TrimSpace(input.CopySourceKey) == "" { + return nil, errors.New("Source key is empty") + } + + output = &CopyPartOutput{} + err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + output.PartNumber = input.PartNumber + } + return +} + +// SetBucketRequestPayment sets requester-pays setting for a bucket. +func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketRequestPaymentInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPayment gets requester-pays setting of a bucket. +func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions) + if err != nil { + output = nil + } + return +} + +// UploadFile resume uploads. +// +// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file +// upload failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.UploadFile + ".uploadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize < MIN_PART_SIZE { + input.PartSize = MIN_PART_SIZE + } else if input.PartSize > MAX_PART_SIZE { + input.PartSize = MAX_PART_SIZE + } + + output, err = obsClient.resumeUpload(input, extensions) + return +} + +// DownloadFile resume downloads. +// +// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file +// download failures caused by poor network conditions and program breakdowns. +func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) { + if input.DownloadFile == "" { + input.DownloadFile = input.Key + } + + if input.EnableCheckpoint && input.CheckpointFile == "" { + input.CheckpointFile = input.DownloadFile + ".downloadfile_record" + } + + if input.TaskNum <= 0 { + input.TaskNum = 1 + } + if input.PartSize <= 0 { + input.PartSize = DEFAULT_PART_SIZE + } + + output, err = obsClient.resumeDownload(input, extensions) + return +} + +// SetBucketFetchPolicy sets the bucket fetch policy. +// +// You can use this API to set a bucket fetch policy. +func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchPolicyInput is nil") + } + if strings.TrimSpace(string(input.Status)) == "" { + return nil, errors.New("Fetch policy status is empty") + } + if strings.TrimSpace(input.Agency) == "" { + return nil, errors.New("Fetch policy agency is empty") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchPolicy gets the bucket fetch policy. +// +// You can use this API to obtain the fetch policy of a bucket. +func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchPolicyInput is nil") + } + output = &GetBucketFetchPolicyOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// DeleteBucketFetchPolicy deletes the bucket fetch policy. +// +// You can use this API to delete the fetch policy of a bucket. +func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) { + if input == nil { + return nil, errors.New("DeleteBucketFetchPolicyInput is nil") + } + output = &BaseModel{} + err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// SetBucketFetchJob sets the bucket fetch job. +// +// You can use this API to set a bucket fetch job. +func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("SetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.URL) == "" { + return nil, errors.New("URL is empty") + } + output = &SetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions) + if err != nil { + output = nil + } + return +} + +// GetBucketFetchJob gets the bucket fetch job. +// +// You can use this API to obtain the fetch job of a bucket. +func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) { + if input == nil { + return nil, errors.New("GetBucketFetchJobInput is nil") + } + if strings.TrimSpace(input.JobID) == "" { + return nil, errors.New("JobID is empty") + } + output = &GetBucketFetchJobOutput{} + err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions) + if err != nil { + output = nil + } + return +} diff --git a/modules/obs/conf.go b/modules/obs/conf.go new file mode 100755 index 000000000..4b8525bfb --- /dev/null +++ b/modules/obs/conf.go @@ -0,0 +1,471 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +type securityProvider struct { + ak string + sk string + securityToken string +} + +type urlHolder struct { + scheme string + host string + port int +} + +type config struct { + securityProvider *securityProvider + urlHolder *urlHolder + pathStyle bool + cname bool + sslVerify bool + endpoint string + signature SignatureType + region string + connectTimeout int + socketTimeout int + headerTimeout int + idleConnTimeout int + finalTimeout int + maxRetryCount int + proxyURL string + maxConnsPerHost int + pemCerts []byte + transport *http.Transport + ctx context.Context + maxRedirectCount int +} + +func (conf config) String() string { + return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+ + "\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+ + "\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]", + conf.endpoint, conf.signature, conf.pathStyle, conf.region, + conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout, + conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount, + ) +} + +type configurer func(conf *config) + +// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts. +func WithSslVerify(sslVerify bool) configurer { + return WithSslVerifyAndPemCerts(sslVerify, nil) +} + +// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts. +func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer { + return func(conf *config) { + conf.sslVerify = sslVerify + conf.pemCerts = pemCerts + } +} + +// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers. +func WithHeaderTimeout(headerTimeout int) configurer { + return func(conf *config) { + conf.headerTimeout = headerTimeout + } +} + +// WithProxyUrl is a configurer for ObsClient to set HTTP proxy. +func WithProxyUrl(proxyURL string) configurer { + return func(conf *config) { + conf.proxyURL = proxyURL + } +} + +// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections. +func WithMaxConnections(maxConnsPerHost int) configurer { + return func(conf *config) { + conf.maxConnsPerHost = maxConnsPerHost + } +} + +// WithPathStyle is a configurer for ObsClient. +func WithPathStyle(pathStyle bool) configurer { + return func(conf *config) { + conf.pathStyle = pathStyle + } +} + +// WithSignature is a configurer for ObsClient. +func WithSignature(signature SignatureType) configurer { + return func(conf *config) { + conf.signature = signature + } +} + +// WithRegion is a configurer for ObsClient. +func WithRegion(region string) configurer { + return func(conf *config) { + conf.region = region + } +} + +// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing +// an http/https connection, in seconds. +func WithConnectTimeout(connectTimeout int) configurer { + return func(conf *config) { + conf.connectTimeout = connectTimeout + } +} + +// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at +// the socket layer, in seconds. +func WithSocketTimeout(socketTimeout int) configurer { + return func(conf *config) { + conf.socketTimeout = socketTimeout + } +} + +// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection +// in the connection pool, in seconds. +func WithIdleConnTimeout(idleConnTimeout int) configurer { + return func(conf *config) { + conf.idleConnTimeout = idleConnTimeout + } +} + +// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal. +func WithMaxRetryCount(maxRetryCount int) configurer { + return func(conf *config) { + conf.maxRetryCount = maxRetryCount + } +} + +// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys. +func WithSecurityToken(securityToken string) configurer { + return func(conf *config) { + conf.securityProvider.securityToken = securityToken + } +} + +// WithHttpTransport is a configurer for ObsClient to set the customized http Transport. +func WithHttpTransport(transport *http.Transport) configurer { + return func(conf *config) { + conf.transport = transport + } +} + +// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request. +func WithRequestContext(ctx context.Context) configurer { + return func(conf *config) { + conf.ctx = ctx + } +} + +// WithCustomDomainName is a configurer for ObsClient. +func WithCustomDomainName(cname bool) configurer { + return func(conf *config) { + conf.cname = cname + } +} + +// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected. +func WithMaxRedirectCount(maxRedirectCount int) configurer { + return func(conf *config) { + conf.maxRedirectCount = maxRedirectCount + } +} + +func (conf *config) prepareConfig() { + if conf.connectTimeout <= 0 { + conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT + } + + if conf.socketTimeout <= 0 { + conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT + } + + conf.finalTimeout = conf.socketTimeout * 10 + + if conf.headerTimeout <= 0 { + conf.headerTimeout = DEFAULT_HEADER_TIMEOUT + } + + if conf.idleConnTimeout < 0 { + conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT + } + + if conf.maxRetryCount < 0 { + conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT + } + + if conf.maxConnsPerHost <= 0 { + conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST + } + + if conf.maxRedirectCount < 0 { + conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT + } +} + +func (conf *config) initConfigWithDefault() error { + conf.securityProvider.ak = strings.TrimSpace(conf.securityProvider.ak) + conf.securityProvider.sk = strings.TrimSpace(conf.securityProvider.sk) + conf.securityProvider.securityToken = strings.TrimSpace(conf.securityProvider.securityToken) + conf.endpoint = strings.TrimSpace(conf.endpoint) + if conf.endpoint == "" { + return errors.New("endpoint is not set") + } + + if index := strings.Index(conf.endpoint, "?"); index > 0 { + conf.endpoint = conf.endpoint[:index] + } + + for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 { + conf.endpoint = conf.endpoint[:len(conf.endpoint)-1] + } + + if conf.signature == "" { + conf.signature = DEFAULT_SIGNATURE + } + + urlHolder := &urlHolder{} + var address string + if strings.HasPrefix(conf.endpoint, "https://") { + urlHolder.scheme = "https" + address = conf.endpoint[len("https://"):] + } else if strings.HasPrefix(conf.endpoint, "http://") { + urlHolder.scheme = "http" + address = conf.endpoint[len("http://"):] + } else { + urlHolder.scheme = "https" + address = conf.endpoint + } + + addr := strings.Split(address, ":") + if len(addr) == 2 { + if port, err := strconv.Atoi(addr[1]); err == nil { + urlHolder.port = port + } + } + urlHolder.host = addr[0] + if urlHolder.port == 0 { + if urlHolder.scheme == "https" { + urlHolder.port = 443 + } else { + urlHolder.port = 80 + } + } + + if IsIP(urlHolder.host) { + conf.pathStyle = true + } + + conf.urlHolder = urlHolder + + conf.region = strings.TrimSpace(conf.region) + if conf.region == "" { + conf.region = DEFAULT_REGION + } + + conf.prepareConfig() + conf.proxyURL = strings.TrimSpace(conf.proxyURL) + return nil +} + +func (conf *config) getTransport() error { + if conf.transport == nil { + conf.transport = &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout)) + if err != nil { + return nil, err + } + return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil + }, + MaxIdleConns: conf.maxConnsPerHost, + MaxIdleConnsPerHost: conf.maxConnsPerHost, + ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout), + IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout), + } + + if conf.proxyURL != "" { + proxyURL, err := url.Parse(conf.proxyURL) + if err != nil { + return err + } + conf.transport.Proxy = http.ProxyURL(proxyURL) + } + + tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify} + if conf.sslVerify && conf.pemCerts != nil { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(conf.pemCerts) + tlsConfig.RootCAs = pool + } + + conf.transport.TLSClientConfig = tlsConfig + conf.transport.DisableCompression = true + } + + return nil +} + +func checkRedirectFunc(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse +} + +// DummyQueryEscape return the input string. +func DummyQueryEscape(s string) string { + return s +} + +func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) { + urlHolder := conf.urlHolder + if conf.cname { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + if conf.signature == "v4" { + canonicalizedURL = "/" + } else { + canonicalizedURL = "/" + urlHolder.host + "/" + } + } else { + if bucketName == "" { + requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port) + canonicalizedURL = "/" + } else { + if conf.pathStyle { + requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName) + canonicalizedURL = "/" + bucketName + } else { + requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port) + if conf.signature == "v2" || conf.signature == "OBS" { + canonicalizedURL = "/" + bucketName + "/" + } else { + canonicalizedURL = "/" + } + } + } + } + return +} + +func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) { + if escape { + tempKey := []rune(objectKey) + result := make([]string, 0, len(tempKey)) + for _, value := range tempKey { + if string(value) == "/" { + result = append(result, string(value)) + } else { + if string(value) == " " { + result = append(result, url.PathEscape(string(value))) + } else { + result = append(result, url.QueryEscape(string(value))) + } + } + } + encodeObjectKey = strings.Join(result, "") + } else { + encodeObjectKey = escapeFunc(objectKey) + } + return +} + +func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) { + if escape { + return url.QueryEscape + } + return DummyQueryEscape +} + +func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) { + + requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName) + var escapeFunc func(s string) string + escapeFunc = conf.prepareEscapeFunc(escape) + + if objectKey != "" { + var encodeObjectKey string + encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc) + requestURL += "/" + encodeObjectKey + if !strings.HasSuffix(canonicalizedURL, "/") { + canonicalizedURL += "/" + } + canonicalizedURL += encodeObjectKey + } + + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, strings.TrimSpace(key)) + } + sort.Strings(keys) + i := 0 + + for index, key := range keys { + if index == 0 { + requestURL += "?" + } else { + requestURL += "&" + } + _key := url.QueryEscape(key) + requestURL += _key + + _value := params[key] + if conf.signature == "v4" { + requestURL += "=" + url.QueryEscape(_value) + } else { + if _value != "" { + requestURL += "=" + url.QueryEscape(_value) + _value = "=" + _value + } else { + _value = "" + } + lowerKey := strings.ToLower(key) + _, ok := allowedResourceParameterNames[lowerKey] + prefixHeader := HEADER_PREFIX + isObs := conf.signature == SignatureObs + if isObs { + prefixHeader = HEADER_PREFIX_OBS + } + ok = ok || strings.HasPrefix(lowerKey, prefixHeader) + if ok { + if i == 0 { + canonicalizedURL += "?" + } else { + canonicalizedURL += "&" + } + canonicalizedURL += getQueryURL(_key, _value) + i++ + } + } + } + return +} + +func getQueryURL(key, value string) string { + queryURL := "" + queryURL += key + queryURL += value + return queryURL +} diff --git a/modules/obs/const.go b/modules/obs/const.go new file mode 100755 index 000000000..89f1e08eb --- /dev/null +++ b/modules/obs/const.go @@ -0,0 +1,932 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +const ( + obsSdkVersion = "3.20.9" + USER_AGENT = "obs-sdk-go/" + obsSdkVersion + HEADER_PREFIX = "x-amz-" + HEADER_PREFIX_META = "x-amz-meta-" + HEADER_PREFIX_OBS = "x-obs-" + HEADER_PREFIX_META_OBS = "x-obs-meta-" + HEADER_DATE_AMZ = "x-amz-date" + HEADER_DATE_OBS = "x-obs-date" + HEADER_STS_TOKEN_AMZ = "x-amz-security-token" + HEADER_STS_TOKEN_OBS = "x-obs-security-token" + HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId" + PREFIX_META = "meta-" + + HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256" + HEADER_ACL_AMZ = "x-amz-acl" + HEADER_ACL_OBS = "x-obs-acl" + HEADER_ACL = "acl" + HEADER_LOCATION_AMZ = "location" + HEADER_BUCKET_LOCATION_OBS = "bucket-location" + HEADER_COPY_SOURCE = "copy-source" + HEADER_COPY_SOURCE_RANGE = "copy-source-range" + HEADER_RANGE = "Range" + HEADER_STORAGE_CLASS = "x-default-storage-class" + HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class" + HEADER_VERSION_OBS = "version" + HEADER_GRANT_READ_OBS = "grant-read" + HEADER_GRANT_WRITE_OBS = "grant-write" + HEADER_GRANT_READ_ACP_OBS = "grant-read-acp" + HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp" + HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control" + HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered" + HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered" + HEADER_REQUEST_ID = "request-id" + HEADER_BUCKET_REGION = "bucket-region" + HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin" + HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers" + HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age" + HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods" + HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers" + HEADER_EPID_HEADERS = "epid" + HEADER_VERSION_ID = "version-id" + HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id" + HEADER_DELETE_MARKER = "delete-marker" + HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location" + HEADER_METADATA_DIRECTIVE = "metadata-directive" + HEADER_EXPIRATION = "expiration" + HEADER_EXPIRES_OBS = "x-obs-expires" + HEADER_RESTORE = "restore" + HEADER_OBJECT_TYPE = "object-type" + HEADER_NEXT_APPEND_POSITION = "next-append-position" + HEADER_STORAGE_CLASS2 = "storage-class" + HEADER_CONTENT_LENGTH = "content-length" + HEADER_CONTENT_TYPE = "content-type" + HEADER_CONTENT_LANGUAGE = "content-language" + HEADER_EXPIRES = "expires" + HEADER_CACHE_CONTROL = "cache-control" + HEADER_CONTENT_DISPOSITION = "content-disposition" + HEADER_CONTENT_ENCODING = "content-encoding" + HEADER_AZ_REDUNDANCY = "az-redundancy" + headerOefMarker = "oef-marker" + + HEADER_ETAG = "etag" + HEADER_LASTMODIFIED = "last-modified" + + HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match" + HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match" + HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since" + HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since" + + HEADER_IF_MATCH = "If-Match" + HEADER_IF_NONE_MATCH = "If-None-Match" + HEADER_IF_MODIFIED_SINCE = "If-Modified-Since" + HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since" + + HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm" + HEADER_SSEC_KEY = "server-side-encryption-customer-key" + HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_ENCRYPTION = "server-side-encryption" + HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id" + HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id" + + HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm" + HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key" + HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5" + + HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id" + + HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id" + + HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect" + + HEADER_DATE_CAMEL = "Date" + HEADER_HOST_CAMEL = "Host" + HEADER_HOST = "host" + HEADER_AUTH_CAMEL = "Authorization" + HEADER_MD5_CAMEL = "Content-MD5" + HEADER_LOCATION_CAMEL = "Location" + HEADER_CONTENT_LENGTH_CAMEL = "Content-Length" + HEADER_CONTENT_TYPE_CAML = "Content-Type" + HEADER_USER_AGENT_CAMEL = "User-Agent" + HEADER_ORIGIN_CAMEL = "Origin" + HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers" + HEADER_CACHE_CONTROL_CAMEL = "Cache-Control" + HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition" + HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding" + HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language" + HEADER_EXPIRES_CAMEL = "Expires" + + PARAM_VERSION_ID = "versionId" + PARAM_RESPONSE_CONTENT_TYPE = "response-content-type" + PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language" + PARAM_RESPONSE_EXPIRES = "response-expires" + PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control" + PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition" + PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding" + PARAM_IMAGE_PROCESS = "x-image-process" + + PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm" + PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential" + PARAM_DATE_AMZ_CAMEL = "X-Amz-Date" + PARAM_DATE_OBS_CAMEL = "X-Obs-Date" + PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires" + PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders" + PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature" + + DEFAULT_SIGNATURE = SignatureV2 + DEFAULT_REGION = "region" + DEFAULT_CONNECT_TIMEOUT = 60 + DEFAULT_SOCKET_TIMEOUT = 60 + DEFAULT_HEADER_TIMEOUT = 60 + DEFAULT_IDLE_CONN_TIMEOUT = 30 + DEFAULT_MAX_RETRY_COUNT = 3 + DEFAULT_MAX_REDIRECT_COUNT = 3 + DEFAULT_MAX_CONN_PER_HOST = 1000 + EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD" + LONG_DATE_FORMAT = "20060102T150405Z" + SHORT_DATE_FORMAT = "20060102" + ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z" + ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z" + RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT" + + V4_SERVICE_NAME = "s3" + V4_SERVICE_SUFFIX = "aws4_request" + + V2_HASH_PREFIX = "AWS" + OBS_HASH_PREFIX = "OBS" + + V4_HASH_PREFIX = "AWS4-HMAC-SHA256" + V4_HASH_PRE = "AWS4" + + DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms" + DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms" + + DEFAULT_SSE_C_ENCRYPTION = "AES256" + + HTTP_GET = "GET" + HTTP_POST = "POST" + HTTP_PUT = "PUT" + HTTP_DELETE = "DELETE" + HTTP_HEAD = "HEAD" + HTTP_OPTIONS = "OPTIONS" + + REQUEST_PAYER = "request-payer" + MULTI_AZ = "3az" + + MAX_PART_SIZE = 5 * 1024 * 1024 * 1024 + MIN_PART_SIZE = 100 * 1024 + DEFAULT_PART_SIZE = 9 * 1024 * 1024 + MAX_PART_NUM = 10000 +) + +// SignatureType defines type of signature +type SignatureType string + +const ( + // SignatureV2 signature type v2 + SignatureV2 SignatureType = "v2" + // SignatureV4 signature type v4 + SignatureV4 SignatureType = "v4" + // SignatureObs signature type OBS + SignatureObs SignatureType = "OBS" +) + +var ( + interestedHeaders = []string{"content-md5", "content-type", "date"} + + allowedRequestHTTPHeaderMetadataNames = map[string]bool{ + "content-type": true, + "content-md5": true, + "content-length": true, + "content-language": true, + "expires": true, + "origin": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, + "access-control-request-method": true, + "access-control-request-headers": true, + "x-default-storage-class": true, + "location": true, + "date": true, + "etag": true, + "range": true, + "host": true, + "if-modified-since": true, + "if-unmodified-since": true, + "if-match": true, + "if-none-match": true, + "last-modified": true, + "content-range": true, + } + + allowedResourceParameterNames = map[string]bool{ + "acl": true, + "backtosource": true, + "metadata": true, + "policy": true, + "torrent": true, + "logging": true, + "location": true, + "storageinfo": true, + "quota": true, + "storageclass": true, + "storagepolicy": true, + "requestpayment": true, + "versions": true, + "versioning": true, + "versionid": true, + "uploads": true, + "uploadid": true, + "partnumber": true, + "website": true, + "notification": true, + "lifecycle": true, + "deletebucket": true, + "delete": true, + "cors": true, + "restore": true, + "tagging": true, + "append": true, + "position": true, + "replication": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "x-image-process": true, + "x-oss-process": true, + "x-image-save-bucket": true, + "x-image-save-object": true, + "ignore-sign-in-query": true, + } + + mimeTypes = map[string]string{ + "001": "application/x-001", + "301": "application/x-301", + "323": "text/h323", + "7z": "application/x-7z-compressed", + "906": "application/x-906", + "907": "drawing/907", + "IVF": "video/x-ivf", + "a11": "application/x-a11", + "aac": "audio/x-aac", + "acp": "audio/x-mei-aac", + "ai": "application/postscript", + "aif": "audio/aiff", + "aifc": "audio/aiff", + "aiff": "audio/aiff", + "anv": "application/x-anv", + "apk": "application/vnd.android.package-archive", + "asa": "text/asa", + "asf": "video/x-ms-asf", + "asp": "text/asp", + "asx": "video/x-ms-asf", + "atom": "application/atom+xml", + "au": "audio/basic", + "avi": "video/avi", + "awf": "application/vnd.adobe.workflow", + "biz": "text/xml", + "bmp": "application/x-bmp", + "bot": "application/x-bot", + "bz2": "application/x-bzip2", + "c4t": "application/x-c4t", + "c90": "application/x-c90", + "cal": "application/x-cals", + "cat": "application/vnd.ms-pki.seccat", + "cdf": "application/x-netcdf", + "cdr": "application/x-cdr", + "cel": "application/x-cel", + "cer": "application/x-x509-ca-cert", + "cg4": "application/x-g4", + "cgm": "application/x-cgm", + "cit": "application/x-cit", + "class": "java/*", + "cml": "text/xml", + "cmp": "application/x-cmp", + "cmx": "application/x-cmx", + "cot": "application/x-cot", + "crl": "application/pkix-crl", + "crt": "application/x-x509-ca-cert", + "csi": "application/x-csi", + "css": "text/css", + "csv": "text/csv", + "cu": "application/cu-seeme", + "cut": "application/x-cut", + "dbf": "application/x-dbf", + "dbm": "application/x-dbm", + "dbx": "application/x-dbx", + "dcd": "text/xml", + "dcx": "application/x-dcx", + "deb": "application/x-debian-package", + "der": "application/x-x509-ca-cert", + "dgn": "application/x-dgn", + "dib": "application/x-dib", + "dll": "application/x-msdownload", + "doc": "application/msword", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "dot": "application/msword", + "drw": "application/x-drw", + "dtd": "text/xml", + "dvi": "application/x-dvi", + "dwf": "application/x-dwf", + "dwg": "application/x-dwg", + "dxb": "application/x-dxb", + "dxf": "application/x-dxf", + "edn": "application/vnd.adobe.edn", + "emf": "application/x-emf", + "eml": "message/rfc822", + "ent": "text/xml", + "eot": "application/vnd.ms-fontobject", + "epi": "application/x-epi", + "eps": "application/postscript", + "epub": "application/epub+zip", + "etd": "application/x-ebx", + "etx": "text/x-setext", + "exe": "application/x-msdownload", + "fax": "image/fax", + "fdf": "application/vnd.fdf", + "fif": "application/fractals", + "flac": "audio/flac", + "flv": "video/x-flv", + "fo": "text/xml", + "frm": "application/x-frm", + "g4": "application/x-g4", + "gbr": "application/x-gbr", + "gif": "image/gif", + "gl2": "application/x-gl2", + "gp4": "application/x-gp4", + "gz": "application/gzip", + "hgl": "application/x-hgl", + "hmr": "application/x-hmr", + "hpg": "application/x-hpgl", + "hpl": "application/x-hpl", + "hqx": "application/mac-binhex40", + "hrf": "application/x-hrf", + "hta": "application/hta", + "htc": "text/x-component", + "htm": "text/html", + "html": "text/html", + "htt": "text/webviewhtml", + "htx": "text/html", + "icb": "application/x-icb", + "ico": "application/x-ico", + "ics": "text/calendar", + "iff": "application/x-iff", + "ig4": "application/x-g4", + "igs": "application/x-igs", + "iii": "application/x-iphone", + "img": "application/x-img", + "ini": "text/plain", + "ins": "application/x-internet-signup", + "ipa": "application/vnd.iphone", + "iso": "application/x-iso9660-image", + "isp": "application/x-internet-signup", + "jar": "application/java-archive", + "java": "java/*", + "jfif": "image/jpeg", + "jpe": "image/jpeg", + "jpeg": "image/jpeg", + "jpg": "image/jpeg", + "js": "application/x-javascript", + "json": "application/json", + "jsp": "text/html", + "la1": "audio/x-liquid-file", + "lar": "application/x-laplayer-reg", + "latex": "application/x-latex", + "lavs": "audio/x-liquid-secure", + "lbm": "application/x-lbm", + "lmsff": "audio/x-la-lms", + "log": "text/plain", + "ls": "application/x-javascript", + "ltr": "application/x-ltr", + "m1v": "video/x-mpeg", + "m2v": "video/x-mpeg", + "m3u": "audio/mpegurl", + "m4a": "audio/mp4", + "m4e": "video/mpeg4", + "m4v": "video/mp4", + "mac": "application/x-mac", + "man": "application/x-troff-man", + "math": "text/xml", + "mdb": "application/msaccess", + "mfp": "application/x-shockwave-flash", + "mht": "message/rfc822", + "mhtml": "message/rfc822", + "mi": "application/x-mi", + "mid": "audio/mid", + "midi": "audio/mid", + "mil": "application/x-mil", + "mml": "text/xml", + "mnd": "audio/x-musicnet-download", + "mns": "audio/x-musicnet-stream", + "mocha": "application/x-javascript", + "mov": "video/quicktime", + "movie": "video/x-sgi-movie", + "mp1": "audio/mp1", + "mp2": "audio/mp2", + "mp2v": "video/mpeg", + "mp3": "audio/mp3", + "mp4": "video/mp4", + "mp4a": "audio/mp4", + "mp4v": "video/mp4", + "mpa": "video/x-mpg", + "mpd": "application/vnd.ms-project", + "mpe": "video/mpeg", + "mpeg": "video/mpeg", + "mpg": "video/mpeg", + "mpg4": "video/mp4", + "mpga": "audio/rn-mpeg", + "mpp": "application/vnd.ms-project", + "mps": "video/x-mpeg", + "mpt": "application/vnd.ms-project", + "mpv": "video/mpg", + "mpv2": "video/mpeg", + "mpw": "application/vnd.ms-project", + "mpx": "application/vnd.ms-project", + "mtx": "text/xml", + "mxp": "application/x-mmxp", + "net": "image/pnetvue", + "nrf": "application/x-nrf", + "nws": "message/rfc822", + "odc": "text/x-ms-odc", + "oga": "audio/ogg", + "ogg": "audio/ogg", + "ogv": "video/ogg", + "ogx": "application/ogg", + "out": "application/x-out", + "p10": "application/pkcs10", + "p12": "application/x-pkcs12", + "p7b": "application/x-pkcs7-certificates", + "p7c": "application/pkcs7-mime", + "p7m": "application/pkcs7-mime", + "p7r": "application/x-pkcs7-certreqresp", + "p7s": "application/pkcs7-signature", + "pbm": "image/x-portable-bitmap", + "pc5": "application/x-pc5", + "pci": "application/x-pci", + "pcl": "application/x-pcl", + "pcx": "application/x-pcx", + "pdf": "application/pdf", + "pdx": "application/vnd.adobe.pdx", + "pfx": "application/x-pkcs12", + "pgl": "application/x-pgl", + "pgm": "image/x-portable-graymap", + "pic": "application/x-pic", + "pko": "application/vnd.ms-pki.pko", + "pl": "application/x-perl", + "plg": "text/html", + "pls": "audio/scpls", + "plt": "application/x-plt", + "png": "image/png", + "pnm": "image/x-portable-anymap", + "pot": "application/vnd.ms-powerpoint", + "ppa": "application/vnd.ms-powerpoint", + "ppm": "application/x-ppm", + "pps": "application/vnd.ms-powerpoint", + "ppt": "application/vnd.ms-powerpoint", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "pr": "application/x-pr", + "prf": "application/pics-rules", + "prn": "application/x-prn", + "prt": "application/x-prt", + "ps": "application/postscript", + "ptn": "application/x-ptn", + "pwz": "application/vnd.ms-powerpoint", + "qt": "video/quicktime", + "r3t": "text/vnd.rn-realtext3d", + "ra": "audio/vnd.rn-realaudio", + "ram": "audio/x-pn-realaudio", + "rar": "application/x-rar-compressed", + "ras": "application/x-ras", + "rat": "application/rat-file", + "rdf": "text/xml", + "rec": "application/vnd.rn-recording", + "red": "application/x-red", + "rgb": "application/x-rgb", + "rjs": "application/vnd.rn-realsystem-rjs", + "rjt": "application/vnd.rn-realsystem-rjt", + "rlc": "application/x-rlc", + "rle": "application/x-rle", + "rm": "application/vnd.rn-realmedia", + "rmf": "application/vnd.adobe.rmf", + "rmi": "audio/mid", + "rmj": "application/vnd.rn-realsystem-rmj", + "rmm": "audio/x-pn-realaudio", + "rmp": "application/vnd.rn-rn_music_package", + "rms": "application/vnd.rn-realmedia-secure", + "rmvb": "application/vnd.rn-realmedia-vbr", + "rmx": "application/vnd.rn-realsystem-rmx", + "rnx": "application/vnd.rn-realplayer", + "rp": "image/vnd.rn-realpix", + "rpm": "audio/x-pn-realaudio-plugin", + "rsml": "application/vnd.rn-rsml", + "rss": "application/rss+xml", + "rt": "text/vnd.rn-realtext", + "rtf": "application/x-rtf", + "rv": "video/vnd.rn-realvideo", + "sam": "application/x-sam", + "sat": "application/x-sat", + "sdp": "application/sdp", + "sdw": "application/x-sdw", + "sgm": "text/sgml", + "sgml": "text/sgml", + "sis": "application/vnd.symbian.install", + "sisx": "application/vnd.symbian.install", + "sit": "application/x-stuffit", + "slb": "application/x-slb", + "sld": "application/x-sld", + "slk": "drawing/x-slk", + "smi": "application/smil", + "smil": "application/smil", + "smk": "application/x-smk", + "snd": "audio/basic", + "sol": "text/plain", + "sor": "text/plain", + "spc": "application/x-pkcs7-certificates", + "spl": "application/futuresplash", + "spp": "text/xml", + "ssm": "application/streamingmedia", + "sst": "application/vnd.ms-pki.certstore", + "stl": "application/vnd.ms-pki.stl", + "stm": "text/html", + "sty": "application/x-sty", + "svg": "image/svg+xml", + "swf": "application/x-shockwave-flash", + "tar": "application/x-tar", + "tdf": "application/x-tdf", + "tg4": "application/x-tg4", + "tga": "application/x-tga", + "tif": "image/tiff", + "tiff": "image/tiff", + "tld": "text/xml", + "top": "drawing/x-top", + "torrent": "application/x-bittorrent", + "tsd": "text/xml", + "ttf": "application/x-font-ttf", + "txt": "text/plain", + "uin": "application/x-icq", + "uls": "text/iuls", + "vcf": "text/x-vcard", + "vda": "application/x-vda", + "vdx": "application/vnd.visio", + "vml": "text/xml", + "vpg": "application/x-vpeg005", + "vsd": "application/vnd.visio", + "vss": "application/vnd.visio", + "vst": "application/x-vst", + "vsw": "application/vnd.visio", + "vsx": "application/vnd.visio", + "vtx": "application/vnd.visio", + "vxml": "text/xml", + "wav": "audio/wav", + "wax": "audio/x-ms-wax", + "wb1": "application/x-wb1", + "wb2": "application/x-wb2", + "wb3": "application/x-wb3", + "wbmp": "image/vnd.wap.wbmp", + "webm": "video/webm", + "wiz": "application/msword", + "wk3": "application/x-wk3", + "wk4": "application/x-wk4", + "wkq": "application/x-wkq", + "wks": "application/x-wks", + "wm": "video/x-ms-wm", + "wma": "audio/x-ms-wma", + "wmd": "application/x-ms-wmd", + "wmf": "application/x-wmf", + "wml": "text/vnd.wap.wml", + "wmv": "video/x-ms-wmv", + "wmx": "video/x-ms-wmx", + "wmz": "application/x-ms-wmz", + "woff": "application/x-font-woff", + "wp6": "application/x-wp6", + "wpd": "application/x-wpd", + "wpg": "application/x-wpg", + "wpl": "application/vnd.ms-wpl", + "wq1": "application/x-wq1", + "wr1": "application/x-wr1", + "wri": "application/x-wri", + "wrk": "application/x-wrk", + "ws": "application/x-ws", + "ws2": "application/x-ws", + "wsc": "text/scriptlet", + "wsdl": "text/xml", + "wvx": "video/x-ms-wvx", + "x_b": "application/x-x_b", + "x_t": "application/x-x_t", + "xap": "application/x-silverlight-app", + "xbm": "image/x-xbitmap", + "xdp": "application/vnd.adobe.xdp", + "xdr": "text/xml", + "xfd": "application/vnd.adobe.xfd", + "xfdf": "application/vnd.adobe.xfdf", + "xhtml": "text/html", + "xls": "application/vnd.ms-excel", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "xlw": "application/x-xlw", + "xml": "text/xml", + "xpl": "audio/scpls", + "xpm": "image/x-xpixmap", + "xq": "text/xml", + "xql": "text/xml", + "xquery": "text/xml", + "xsd": "text/xml", + "xsl": "text/xml", + "xslt": "text/xml", + "xwd": "application/x-xwd", + "yaml": "text/yaml", + "yml": "text/yaml", + "zip": "application/zip", + } +) + +// HttpMethodType defines http method type +type HttpMethodType string + +const ( + HttpMethodGet HttpMethodType = HTTP_GET + HttpMethodPut HttpMethodType = HTTP_PUT + HttpMethodPost HttpMethodType = HTTP_POST + HttpMethodDelete HttpMethodType = HTTP_DELETE + HttpMethodHead HttpMethodType = HTTP_HEAD + HttpMethodOptions HttpMethodType = HTTP_OPTIONS +) + +// SubResourceType defines the subResource value +type SubResourceType string + +const ( + // SubResourceStoragePolicy subResource value: storagePolicy + SubResourceStoragePolicy SubResourceType = "storagePolicy" + + // SubResourceStorageClass subResource value: storageClass + SubResourceStorageClass SubResourceType = "storageClass" + + // SubResourceQuota subResource value: quota + SubResourceQuota SubResourceType = "quota" + + // SubResourceStorageInfo subResource value: storageinfo + SubResourceStorageInfo SubResourceType = "storageinfo" + + // SubResourceLocation subResource value: location + SubResourceLocation SubResourceType = "location" + + // SubResourceAcl subResource value: acl + SubResourceAcl SubResourceType = "acl" + + // SubResourcePolicy subResource value: policy + SubResourcePolicy SubResourceType = "policy" + + // SubResourceCors subResource value: cors + SubResourceCors SubResourceType = "cors" + + // SubResourceVersioning subResource value: versioning + SubResourceVersioning SubResourceType = "versioning" + + // SubResourceWebsite subResource value: website + SubResourceWebsite SubResourceType = "website" + + // SubResourceLogging subResource value: logging + SubResourceLogging SubResourceType = "logging" + + // SubResourceLifecycle subResource value: lifecycle + SubResourceLifecycle SubResourceType = "lifecycle" + + // SubResourceNotification subResource value: notification + SubResourceNotification SubResourceType = "notification" + + // SubResourceTagging subResource value: tagging + SubResourceTagging SubResourceType = "tagging" + + // SubResourceDelete subResource value: delete + SubResourceDelete SubResourceType = "delete" + + // SubResourceVersions subResource value: versions + SubResourceVersions SubResourceType = "versions" + + // SubResourceUploads subResource value: uploads + SubResourceUploads SubResourceType = "uploads" + + // SubResourceRestore subResource value: restore + SubResourceRestore SubResourceType = "restore" + + // SubResourceMetadata subResource value: metadata + SubResourceMetadata SubResourceType = "metadata" + + // SubResourceRequestPayment subResource value: requestPayment + SubResourceRequestPayment SubResourceType = "requestPayment" +) + +// objectKeyType defines the objectKey value +type objectKeyType string + +const ( + // objectKeyExtensionPolicy objectKey value: v1/extension_policy + objectKeyExtensionPolicy objectKeyType = "v1/extension_policy" + + // objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs + objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs" +) + +// AclType defines bucket/object acl type +type AclType string + +const ( + AclPrivate AclType = "private" + AclPublicRead AclType = "public-read" + AclPublicReadWrite AclType = "public-read-write" + AclAuthenticatedRead AclType = "authenticated-read" + AclBucketOwnerRead AclType = "bucket-owner-read" + AclBucketOwnerFullControl AclType = "bucket-owner-full-control" + AclLogDeliveryWrite AclType = "log-delivery-write" + AclPublicReadDelivery AclType = "public-read-delivered" + AclPublicReadWriteDelivery AclType = "public-read-write-delivered" +) + +// StorageClassType defines bucket storage class +type StorageClassType string + +const ( + //StorageClassStandard storage class: STANDARD + StorageClassStandard StorageClassType = "STANDARD" + + //StorageClassWarm storage class: WARM + StorageClassWarm StorageClassType = "WARM" + + //StorageClassCold storage class: COLD + StorageClassCold StorageClassType = "COLD" + + storageClassStandardIA StorageClassType = "STANDARD_IA" + storageClassGlacier StorageClassType = "GLACIER" +) + +// PermissionType defines permission type +type PermissionType string + +const ( + // PermissionRead permission type: READ + PermissionRead PermissionType = "READ" + + // PermissionWrite permission type: WRITE + PermissionWrite PermissionType = "WRITE" + + // PermissionReadAcp permission type: READ_ACP + PermissionReadAcp PermissionType = "READ_ACP" + + // PermissionWriteAcp permission type: WRITE_ACP + PermissionWriteAcp PermissionType = "WRITE_ACP" + + // PermissionFullControl permission type: FULL_CONTROL + PermissionFullControl PermissionType = "FULL_CONTROL" +) + +// GranteeType defines grantee type +type GranteeType string + +const ( + // GranteeGroup grantee type: Group + GranteeGroup GranteeType = "Group" + + // GranteeUser grantee type: CanonicalUser + GranteeUser GranteeType = "CanonicalUser" +) + +// GroupUriType defines grantee uri type +type GroupUriType string + +const ( + // GroupAllUsers grantee uri type: AllUsers + GroupAllUsers GroupUriType = "AllUsers" + + // GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers + GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers" + + // GroupLogDelivery grantee uri type: LogDelivery + GroupLogDelivery GroupUriType = "LogDelivery" +) + +// VersioningStatusType defines bucket version status +type VersioningStatusType string + +const ( + // VersioningStatusEnabled version status: Enabled + VersioningStatusEnabled VersioningStatusType = "Enabled" + + // VersioningStatusSuspended version status: Suspended + VersioningStatusSuspended VersioningStatusType = "Suspended" +) + +// ProtocolType defines protocol type +type ProtocolType string + +const ( + // ProtocolHttp prorocol type: http + ProtocolHttp ProtocolType = "http" + + // ProtocolHttps prorocol type: https + ProtocolHttps ProtocolType = "https" +) + +// RuleStatusType defines lifeCycle rule status +type RuleStatusType string + +const ( + // RuleStatusEnabled rule status: Enabled + RuleStatusEnabled RuleStatusType = "Enabled" + + // RuleStatusDisabled rule status: Disabled + RuleStatusDisabled RuleStatusType = "Disabled" +) + +// RestoreTierType defines restore options +type RestoreTierType string + +const ( + // RestoreTierExpedited restore options: Expedited + RestoreTierExpedited RestoreTierType = "Expedited" + + // RestoreTierStandard restore options: Standard + RestoreTierStandard RestoreTierType = "Standard" + + // RestoreTierBulk restore options: Bulk + RestoreTierBulk RestoreTierType = "Bulk" +) + +// MetadataDirectiveType defines metadata operation indicator +type MetadataDirectiveType string + +const ( + // CopyMetadata metadata operation: COPY + CopyMetadata MetadataDirectiveType = "COPY" + + // ReplaceNew metadata operation: REPLACE_NEW + ReplaceNew MetadataDirectiveType = "REPLACE_NEW" + + // ReplaceMetadata metadata operation: REPLACE + ReplaceMetadata MetadataDirectiveType = "REPLACE" +) + +// EventType defines bucket notification type of events +type EventType string + +const ( + // ObjectCreatedAll type of events: ObjectCreated:* + ObjectCreatedAll EventType = "ObjectCreated:*" + + // ObjectCreatedPut type of events: ObjectCreated:Put + ObjectCreatedPut EventType = "ObjectCreated:Put" + + // ObjectCreatedPost type of events: ObjectCreated:Post + ObjectCreatedPost EventType = "ObjectCreated:Post" + + // ObjectCreatedCopy type of events: ObjectCreated:Copy + ObjectCreatedCopy EventType = "ObjectCreated:Copy" + + // ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload + ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload" + + // ObjectRemovedAll type of events: ObjectRemoved:* + ObjectRemovedAll EventType = "ObjectRemoved:*" + + // ObjectRemovedDelete type of events: ObjectRemoved:Delete + ObjectRemovedDelete EventType = "ObjectRemoved:Delete" + + // ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated + ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated" +) + +// PayerType defines type of payer +type PayerType string + +const ( + // BucketOwnerPayer type of payer: BucketOwner + BucketOwnerPayer PayerType = "BucketOwner" + + // RequesterPayer type of payer: Requester + RequesterPayer PayerType = "Requester" + + // Requester header for requester-Pays + Requester PayerType = "requester" +) + +// FetchPolicyStatusType defines type of fetch policy status +type FetchPolicyStatusType string + +const ( + // FetchStatusOpen type of status: open + FetchStatusOpen FetchPolicyStatusType = "open" + + // FetchStatusClosed type of status: closed + FetchStatusClosed FetchPolicyStatusType = "closed" +) diff --git a/modules/obs/convert.go b/modules/obs/convert.go new file mode 100755 index 000000000..bd859556b --- /dev/null +++ b/modules/obs/convert.go @@ -0,0 +1,880 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +func cleanHeaderPrefix(header http.Header) map[string][]string { + responseHeaders := make(map[string][]string) + for key, value := range header { + if len(value) > 0 { + key = strings.ToLower(key) + if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) { + key = key[len(HEADER_PREFIX):] + } + responseHeaders[key] = value + } + } + return responseHeaders +} + +// ParseStringToEventType converts string value to EventType value and returns it +func ParseStringToEventType(value string) (ret EventType) { + switch value { + case "ObjectCreated:*", "s3:ObjectCreated:*": + ret = ObjectCreatedAll + case "ObjectCreated:Put", "s3:ObjectCreated:Put": + ret = ObjectCreatedPut + case "ObjectCreated:Post", "s3:ObjectCreated:Post": + ret = ObjectCreatedPost + case "ObjectCreated:Copy", "s3:ObjectCreated:Copy": + ret = ObjectCreatedCopy + case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload": + ret = ObjectCreatedCompleteMultipartUpload + case "ObjectRemoved:*", "s3:ObjectRemoved:*": + ret = ObjectRemovedAll + case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete": + ret = ObjectRemovedDelete + case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated": + ret = ObjectRemovedDeleteMarkerCreated + default: + ret = "" + } + return +} + +// ParseStringToStorageClassType converts string value to StorageClassType value and returns it +func ParseStringToStorageClassType(value string) (ret StorageClassType) { + switch value { + case "STANDARD": + ret = StorageClassStandard + case "STANDARD_IA", "WARM": + ret = StorageClassWarm + case "GLACIER", "COLD": + ret = StorageClassCold + default: + ret = "" + } + return +} + +func prepareGrantURI(grant Grant) string { + if grant.Grantee.URI == GroupAllUsers || grant.Grantee.URI == GroupAuthenticatedUsers { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/global/", grant.Grantee.URI) + } + if grant.Grantee.URI == GroupLogDelivery { + return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/s3/", grant.Grantee.URI) + } + return fmt.Sprintf("%s", grant.Grantee.URI) +} + +func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string { + xml := make([]string, 0, 4) + + if grant.Grantee.Type == GranteeUser { + if isObs { + xml = append(xml, "") + } else { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + } + if grant.Grantee.ID != "" { + granteeID := XmlTranscoding(grant.Grantee.ID) + xml = append(xml, fmt.Sprintf("%s", granteeID)) + } + if !isObs && grant.Grantee.DisplayName != "" { + granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName) + xml = append(xml, fmt.Sprintf("%s", granteeDisplayName)) + } + xml = append(xml, "") + } else { + if !isObs { + xml = append(xml, fmt.Sprintf("", grant.Grantee.Type)) + xml = append(xml, prepareGrantURI(grant)) + xml = append(xml, "") + } else if grant.Grantee.URI == GroupAllUsers { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("Everyone")) + xml = append(xml, "") + } else { + return strings.Join(xml, "") + } + } + + xml = append(xml, fmt.Sprintf("%s", grant.Permission)) + if isObs && isBucket { + xml = append(xml, fmt.Sprintf("%t", grant.Delivered)) + } + xml = append(xml, fmt.Sprintf("")) + return strings.Join(xml, "") +} + +func hasLoggingTarget(input BucketLoggingStatus) bool { + if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 { + return true + } + return false +} + +// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it +func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) { + grantsLength := len(input.TargetGrants) + xml := make([]string, 0, 8+grantsLength) + + xml = append(xml, "") + if isObs && input.Agency != "" { + agency := XmlTranscoding(input.Agency) + xml = append(xml, fmt.Sprintf("%s", agency)) + } + if hasLoggingTarget(input) { + xml = append(xml, "") + if input.TargetBucket != "" { + xml = append(xml, fmt.Sprintf("%s", input.TargetBucket)) + } + if input.TargetPrefix != "" { + targetPrefix := XmlTranscoding(input.TargetPrefix) + xml = append(xml, fmt.Sprintf("%s", targetPrefix)) + } + if grantsLength > 0 { + xml = append(xml, "") + for _, grant := range input.TargetGrants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + } + + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it +func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + if isObs && input.Delivered != "" { + objectDelivered := XmlTranscoding(input.Delivered) + xml = append(xml, fmt.Sprintf("%s", objectDelivered)) + } else { + xml = append(xml, "") + } + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, false)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 4+len(input.Grants)) + ownerID := XmlTranscoding(input.Owner.ID) + xml = append(xml, fmt.Sprintf("%s", ownerID)) + if !isObs && input.Owner.DisplayName != "" { + ownerDisplayName := XmlTranscoding(input.Owner.DisplayName) + xml = append(xml, fmt.Sprintf("%s", ownerDisplayName)) + } + + xml = append(xml, "") + + for _, grant := range input.Grants { + xml = append(xml, convertGrantToXML(grant, isObs, true)) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertConditionToXML(condition Condition) string { + xml := make([]string, 0, 2) + if condition.KeyPrefixEquals != "" { + keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals) + xml = append(xml, fmt.Sprintf("%s", keyPrefixEquals)) + } + if condition.HttpErrorCodeReturnedEquals != "" { + xml = append(xml, fmt.Sprintf("%s", condition.HttpErrorCodeReturnedEquals)) + } + if len(xml) > 0 { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func prepareRoutingRule(input BucketWebsiteConfiguration) string { + xml := make([]string, 0, len(input.RoutingRules)*10) + for _, routingRule := range input.RoutingRules { + xml = append(xml, "") + xml = append(xml, "") + if routingRule.Redirect.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.Protocol)) + } + if routingRule.Redirect.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HostName)) + } + if routingRule.Redirect.ReplaceKeyPrefixWith != "" { + replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyPrefixWith)) + } + + if routingRule.Redirect.ReplaceKeyWith != "" { + replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith) + xml = append(xml, fmt.Sprintf("%s", replaceKeyWith)) + } + if routingRule.Redirect.HttpRedirectCode != "" { + xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HttpRedirectCode)) + } + xml = append(xml, "") + + if ret := convertConditionToXML(routingRule.Condition); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + return strings.Join(xml, "") +} + +// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it +func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) { + routingRuleLength := len(input.RoutingRules) + xml := make([]string, 0, 6+routingRuleLength*10) + xml = append(xml, "") + + if input.RedirectAllRequestsTo.HostName != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.HostName)) + if input.RedirectAllRequestsTo.Protocol != "" { + xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.Protocol)) + } + xml = append(xml, "") + } else { + if input.IndexDocument.Suffix != "" { + indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix) + xml = append(xml, fmt.Sprintf("%s", indexDocumentSuffix)) + } + if input.ErrorDocument.Key != "" { + errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key) + xml = append(xml, fmt.Sprintf("%s", errorDocumentKey)) + } + if routingRuleLength > 0 { + xml = append(xml, "") + xml = append(xml, prepareRoutingRule(input)) + xml = append(xml, "") + } + } + + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func convertTransitionsToXML(transitions []Transition, isObs bool) string { + if length := len(transitions); length > 0 { + xml := make([]string, 0, length) + for _, transition := range transitions { + var temp string + if transition.Days > 0 { + temp = fmt.Sprintf("%d", transition.Days) + } else if !transition.Date.IsZero() { + temp = fmt.Sprintf("%s", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + if temp != "" { + if !isObs { + storageClass := string(transition.StorageClass) + if transition.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if transition.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + xml = append(xml, fmt.Sprintf("%s%s", temp, storageClass)) + } else { + xml = append(xml, fmt.Sprintf("%s%s", temp, transition.StorageClass)) + } + } + } + return strings.Join(xml, "") + } + return "" +} + +func convertExpirationToXML(expiration Expiration) string { + if expiration.Days > 0 { + return fmt.Sprintf("%d", expiration.Days) + } else if !expiration.Date.IsZero() { + return fmt.Sprintf("%s", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT)) + } + return "" +} +func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string { + if length := len(noncurrentVersionTransitions); length > 0 { + xml := make([]string, 0, length) + for _, noncurrentVersionTransition := range noncurrentVersionTransitions { + if noncurrentVersionTransition.NoncurrentDays > 0 { + storageClass := string(noncurrentVersionTransition.StorageClass) + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + xml = append(xml, fmt.Sprintf("%d"+ + "%s", + noncurrentVersionTransition.NoncurrentDays, storageClass)) + } + } + return strings.Join(xml, "") + } + return "" +} +func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string { + if noncurrentVersionExpiration.NoncurrentDays > 0 { + return fmt.Sprintf("%d", noncurrentVersionExpiration.NoncurrentDays) + } + return "" +} + +// ConvertLifecyleConfigurationToXml converts BucketLifecyleConfiguration value to XML data and returns it +func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.LifecycleRules)*9) + xml = append(xml, "") + for _, lifecyleRule := range input.LifecycleRules { + xml = append(xml, "") + if lifecyleRule.ID != "" { + lifecyleRuleID := XmlTranscoding(lifecyleRule.ID) + xml = append(xml, fmt.Sprintf("%s", lifecyleRuleID)) + } + lifecyleRulePrefix := XmlTranscoding(lifecyleRule.Prefix) + xml = append(xml, fmt.Sprintf("%s", lifecyleRulePrefix)) + xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Status)) + if ret := convertTransitionsToXML(lifecyleRule.Transitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertExpirationToXML(lifecyleRule.Expiration); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionTransitionsToXML(lifecyleRule.NoncurrentVersionTransitions, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := convertNoncurrentVersionExpirationToXML(lifecyleRule.NoncurrentVersionExpiration); ret != "" { + xml = append(xml, ret) + } + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string { + if length := len(filterRules); length > 0 { + xml := make([]string, 0, length*4) + for _, filterRule := range filterRules { + xml = append(xml, "") + if filterRule.Name != "" { + filterRuleName := XmlTranscoding(filterRule.Name) + xml = append(xml, fmt.Sprintf("%s", filterRuleName)) + } + if filterRule.Value != "" { + filterRuleValue := XmlTranscoding(filterRule.Value) + xml = append(xml, fmt.Sprintf("%s", filterRuleValue)) + } + xml = append(xml, "") + } + if !isObs { + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return fmt.Sprintf("%s", strings.Join(xml, "")) + } + return "" +} + +func converntEventsToXML(events []EventType, isObs bool) string { + if length := len(events); length > 0 { + xml := make([]string, 0, length) + if !isObs { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s%s", "s3:", event)) + } + } else { + for _, event := range events { + xml = append(xml, fmt.Sprintf("%s", event)) + } + } + return strings.Join(xml, "") + } + return "" +} + +func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string { + xml := make([]string, 0, 6) + xml = append(xml, xmlElem) + if topicConfiguration.ID != "" { + topicConfigurationID := XmlTranscoding(topicConfiguration.ID) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationID)) + } + topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic) + xml = append(xml, fmt.Sprintf("%s", topicConfigurationTopic)) + + if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" { + xml = append(xml, ret) + } + if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" { + xml = append(xml, ret) + } + tempElem := xmlElem[0:1] + "/" + xmlElem[1:] + xml = append(xml, tempElem) + return strings.Join(xml, "") +} + +// ConverntObsRestoreToXml converts RestoreObjectInput value to XML data and returns it +func ConverntObsRestoreToXml(restoreObjectInput RestoreObjectInput) string { + xml := make([]string, 0, 2) + xml = append(xml, fmt.Sprintf("%d", restoreObjectInput.Days)) + if restoreObjectInput.Tier != "Bulk" { + xml = append(xml, fmt.Sprintf("%s", restoreObjectInput.Tier)) + } + xml = append(xml, fmt.Sprintf("")) + data := strings.Join(xml, "") + return data +} + +// ConvertNotificationToXml converts BucketNotification value to XML data and returns it +func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.TopicConfigurations)*6) + xml = append(xml, "") + for _, topicConfiguration := range input.TopicConfigurations { + ret := converntConfigureToXML(topicConfiguration, "", isObs) + xml = append(xml, ret) + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it +func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) { + xml := make([]string, 0, 2+len(input.Parts)*4) + xml = append(xml, "") + for _, part := range input.Parts { + xml = append(xml, "") + xml = append(xml, fmt.Sprintf("%d", part.PartNumber)) + xml = append(xml, fmt.Sprintf("%s", part.ETag)) + xml = append(xml, "") + } + xml = append(xml, "") + data = strings.Join(xml, "") + if returnMd5 { + md5 = Base64Md5([]byte(data)) + } + return +} + +func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) { + if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok { + sseCHeader := SseCHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok { + sseCHeader.KeyMD5 = ret[0] + } + sseHeader = sseCHeader + } else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok { + sseKmsHeader := SseKmsHeader{Encryption: ret[0]} + if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok { + sseKmsHeader.Key = ret[0] + } else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok { + sseKmsHeader.Key = ret[0] + } + sseHeader = sseKmsHeader + } + return +} + +func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) { + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok { + AllowOrigin = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok { + AllowHeader = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok { + MaxAgeSeconds = StringToInt(ret[0], 0) + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok { + AllowMethod = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok { + ExposeHeader = ret[0] + } + return +} + +func parseUnCommonHeader(output *GetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok { + output.Expiration = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok { + output.Restore = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok { + output.ObjectType = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok { + output.NextAppendPosition = ret[0] + } +} + +// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers +func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + parseUnCommonHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } + + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok { + ret, err := time.Parse(time.RFC1123, ret[0]) + if err == nil { + output.LastModified = ret + } + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok { + output.ContentLength = StringToInt64(ret[0], 0) + } + + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } + +} + +// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers +func ParseCopyObjectOutput(output *CopyObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok { + output.CopySourceVersionId = ret[0] + } +} + +// ParsePutObjectOutput sets PutObjectOutput field values with response headers +func ParsePutObjectOutput(output *PutObjectOutput) { + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers +func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseUploadPartOutput sets UploadPartOutput field values with response headers +func ParseUploadPartOutput(output *UploadPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok { + output.ETag = ret[0] + } +} + +// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers +func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) + if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = ret[0] + } +} + +// ParseCopyPartOutput sets CopyPartOutput field values with response headers +func ParseCopyPartOutput(output *CopyPartOutput) { + output.SseHeader = parseSseHeader(output.ResponseHeaders) +} + +// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers +func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) { + output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel) + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok { + output.Version = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = ret[0] + } else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok { + output.Location = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok { + output.Epid = ret[0] + } +} + +func parseContentHeader(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok { + output.ContentType = ret[0] + } +} + +// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers +func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) { + if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok { + output.StorageClass = ParseStringToStorageClassType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok { + output.MetadataDirective = MetadataDirectiveType(ret[0]) + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + parseContentHeader(output) + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok { + output.WebsiteRedirectLocation = ret[0] + } + output.Metadata = make(map[string]string) + + for key, value := range output.ResponseHeaders { + if strings.HasPrefix(key, PREFIX_META) { + _key := key[len(PREFIX_META):] + output.ResponseHeaders[_key] = value + output.Metadata[_key] = value[0] + delete(output.ResponseHeaders, key) + } + } +} + +// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers +func ParseDeleteObjectOutput(output *DeleteObjectOutput) { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + + if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = deleteMarker[0] == "true" + } +} + +// ParseGetObjectOutput sets GetObjectOutput field values with response headers +func ParseGetObjectOutput(output *GetObjectOutput) { + ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput) + if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok { + output.DeleteMarker = ret[0] == "true" + } + if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok { + output.CacheControl = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok { + output.ContentDisposition = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok { + output.ContentEncoding = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok { + output.ContentLanguage = ret[0] + } + if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok { + output.Expires = ret[0] + } +} + +// ConvertRequestToIoReaderV2 converts req to XML data +func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) { + data, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data)) + } + return bytes.NewReader(data), Base64Md5(data), nil + } + return nil, "", err +} + +// ConvertRequestToIoReader converts req to XML data +func ConvertRequestToIoReader(req interface{}) (io.Reader, error) { + body, err := TransToXml(req) + if err == nil { + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body)) + } + return bytes.NewReader(body), nil + } + return nil, err +} + +// ParseResponseToBaseModel gets response from OBS +func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) { + readCloser, ok := baseModel.(IReadCloser) + if !ok { + defer func() { + errMsg := resp.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body") + } + }() + body, err := ioutil.ReadAll(resp.Body) + if err == nil && len(body) > 0 { + if xmlResult { + err = ParseXml(body, baseModel) + } else { + s := reflect.TypeOf(baseModel).Elem() + if reflect.TypeOf(baseModel).Elem().Name() == "GetBucketPolicyOutput" { + for i := 0; i < s.NumField(); i++ { + if s.Field(i).Tag == "json:\"body\"" { + reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body)) + break + } + } + } else { + err = parseJSON(body, baseModel) + } + } + if err != nil { + doLog(LEVEL_ERROR, "Unmarshal error: %v", err) + } + } + } else { + readCloser.setReadCloser(resp.Body) + } + + baseModel.setStatusCode(resp.StatusCode) + responseHeaders := cleanHeaderPrefix(resp.Header) + baseModel.setResponseHeaders(responseHeaders) + if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok { + baseModel.setRequestID(values[0]) + } + return +} + +// ParseResponseToObsError gets obsError from OBS +func ParseResponseToObsError(resp *http.Response, isObs bool) error { + isJson := false + if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok { + jsonType, _ := mimeTypes["json"] + isJson = contentType[0] == jsonType + } + obsError := ObsError{} + respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + obsError.Status = resp.Status + return obsError +} + +// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format +func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) { + fetch := map[string]SetBucketFetchPolicyInput{"fetch": input} + json, err := json.Marshal(fetch) + if err != nil { + return "", err + } + data = string(json) + return +} + +// convertFetchJobToJSON converts SetBucketFetchJobInput into json format +func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) { + objectHeaders := make(map[string]string) + for key, value := range input.ObjectHeaders { + if value != "" { + _key := strings.ToLower(key) + if !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + _key = HEADER_PREFIX_META_OBS + _key + } + objectHeaders[_key] = value + } + } + input.ObjectHeaders = objectHeaders + json, err := json.Marshal(input) + if err != nil { + return "", err + } + data = string(json) + return +} diff --git a/modules/obs/error.go b/modules/obs/error.go new file mode 100755 index 000000000..63cb5bb03 --- /dev/null +++ b/modules/obs/error.go @@ -0,0 +1,35 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "encoding/xml" + "fmt" +) + +// ObsError defines error response from OBS +type ObsError struct { + BaseModel + Status string + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code" json:"code"` + Message string `xml:"Message" json:"message"` + Resource string `xml:"Resource"` + HostId string `xml:"HostId"` +} + +func (err ObsError) Error() string { + return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s", + err.Status, err.Code, err.Message, err.RequestId) +} diff --git a/modules/obs/extension.go b/modules/obs/extension.go new file mode 100755 index 000000000..bbf33c56b --- /dev/null +++ b/modules/obs/extension.go @@ -0,0 +1,37 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "fmt" + "strings" +) + +type extensionOptions interface{} +type extensionHeaders func(headers map[string][]string, isObs bool) error + +func setHeaderPrefix(key string, value string) extensionHeaders { + return func(headers map[string][]string, isObs bool) error { + if strings.TrimSpace(value) == "" { + return fmt.Errorf("set header %s with empty value", key) + } + setHeaders(headers, key, []string{value}, isObs) + return nil + } +} + +// WithReqPaymentHeader sets header for requester-pays +func WithReqPaymentHeader(requester PayerType) extensionHeaders { + return setHeaderPrefix(REQUEST_PAYER, string(requester)) +} diff --git a/modules/obs/http.go b/modules/obs/http.go new file mode 100755 index 000000000..e305c14b5 --- /dev/null +++ b/modules/obs/http.go @@ -0,0 +1,566 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +package obs + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string { + _headers := make(map[string][]string, len(headers)) + if headers != nil { + for key, value := range headers { + key = strings.TrimSpace(key) + if key == "" { + continue + } + _key := strings.ToLower(key) + if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) { + if !meta { + continue + } + if !isObs { + _key = HEADER_PREFIX_META + _key + } else { + _key = HEADER_PREFIX_META_OBS + _key + } + } else { + _key = key + } + _headers[_key] = value + } + } + return _headers +} + +func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient.doAction(action, method, "", "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions) +} + +func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error { + return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions) +} + +func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error { + if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname { + return errors.New("Bucket is empty") + } + if strings.TrimSpace(objectKey) == "" { + return errors.New("Key is empty") + } + return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions) +} + +func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error { + + var resp *http.Response + var respError error + doLog(LEVEL_INFO, "Enter method %s...", action) + start := GetCurrentTimestamp() + + params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs) + if err != nil { + return err + } + + if params == nil { + params = make(map[string]string) + } + + if headers == nil { + headers = make(map[string][]string) + } + + for _, extension := range extensions { + if extensionHeader, ok := extension.(extensionHeaders); ok { + _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err)) + } + } else { + doLog(LEVEL_WARN, "Unsupported extensionOptions") + } + } + + switch method { + case HTTP_GET: + resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_POST: + resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_PUT: + resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_DELETE: + resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_HEAD: + resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable) + case HTTP_OPTIONS: + resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable) + default: + respError = errors.New("Unexpect http method error") + } + if respError == nil && output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } else { + doLog(LEVEL_WARN, "Do http request with error: %v", respError) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + + return respError +} + +func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) { + return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable) +} + +func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) { + req, err := http.NewRequest(method, signedURL, data) + if err != nil { + return err + } + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + var resp *http.Response + + var isSecurityToken bool + var securityToken string + var query []string + parmas := strings.Split(signedURL, "?") + if len(parmas) > 1 { + query = strings.Split(parmas[1], "&") + for _, value := range query { + if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") { + if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" { + securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:] + isSecurityToken = true + } + } + } + } + logSignedURL := signedURL + if isSecurityToken { + logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1) + } + doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL) + + req.Header = actualSignedRequestHeaders + if value, ok := req.Header[HEADER_HOST_CAMEL]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST_CAMEL) + } else if value, ok := req.Header[HEADER_HOST]; ok { + req.Host = value[0] + delete(req.Header, HEADER_HOST) + } + + if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL) + } else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok { + req.ContentLength = StringToInt64(value[0], -1) + delete(req.Header, HEADER_CONTENT_LENGTH) + } + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + } + + var msg interface{} + if err != nil { + respError = err + resp = nil + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode >= 300 { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + msg = resp.Status + resp = nil + } else { + if output != nil { + respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs) + } + if respError != nil { + doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError) + } + } + } + + if msg != nil { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + } + + if isDebugLogEnabled() { + doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start)) + } + + return +} + +func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string, + headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) { + + bucketName = strings.TrimSpace(bucketName) + + method = strings.ToUpper(method) + + var redirectURL string + var requestURL string + maxRetryCount := obsClient.conf.maxRetryCount + maxRedirectCount := obsClient.conf.maxRedirectCount + + var _data io.Reader + if data != nil { + if dataStr, ok := data.(string); ok { + doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr) + headers["Content-Length"] = []string{IntToString(len(dataStr))} + _data = strings.NewReader(dataStr) + } else if dataByte, ok := data.([]byte); ok { + doLog(LEVEL_DEBUG, "Do http request with byte array") + headers["Content-Length"] = []string{IntToString(len(dataByte))} + _data = bytes.NewReader(dataByte) + } else if dataReader, ok := data.(io.Reader); ok { + _data = dataReader + } else { + doLog(LEVEL_WARN, "Data is not a valid io.Reader") + return nil, errors.New("Data is not a valid io.Reader") + } + } + + var lastRequest *http.Request + redirectFlag := false + for i, redirectCount := 0, 0; i <= maxRetryCount; i++ { + if redirectURL != "" { + if !redirectFlag { + parsedRedirectURL, err := url.Parse(redirectURL) + if err != nil { + return nil, err + } + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host) + if err != nil { + return nil, err + } + if parsedRequestURL, err := url.Parse(requestURL); err != nil { + return nil, err + } else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" { + redirectURL += "?" + parsedRequestURL.RawQuery + } + } + requestURL = redirectURL + } else { + var err error + requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "") + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, requestURL, _data) + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + if err != nil { + return nil, err + } + doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method) + + if isDebugLogEnabled() { + auth := headers[HEADER_AUTH_CAMEL] + delete(headers, HEADER_AUTH_CAMEL) + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken { + headers[HEADER_STS_TOKEN_AMZ] = []string{"******"} + } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken { + headers[HEADER_STS_TOKEN_OBS] = []string{"******"} + } + doLog(LEVEL_DEBUG, "Request headers: %v", headers) + headers[HEADER_AUTH_CAMEL] = auth + if isSecurityToken { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = securityToken + } else { + headers[HEADER_STS_TOKEN_AMZ] = securityToken + } + } + } + + for key, value := range headers { + if key == HEADER_HOST_CAMEL { + req.Host = value[0] + delete(headers, key) + } else if key == HEADER_CONTENT_LENGTH_CAMEL { + req.ContentLength = StringToInt64(value[0], -1) + delete(headers, key) + } else { + req.Header[key] = value + } + } + + lastRequest = req + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + + if lastRequest != nil { + req.Host = lastRequest.Host + req.ContentLength = lastRequest.ContentLength + } + + start := GetCurrentTimestamp() + resp, err = obsClient.httpClient.Do(req) + if isInfoLogEnabled() { + doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start)) + } + + var msg interface{} + if err != nil { + msg = err + respError = err + resp = nil + if !repeatable { + break + } + } else { + doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header) + if resp.StatusCode < 300 { + break + } else if !repeatable || (resp.StatusCode >= 400 && resp.StatusCode < 500) || resp.StatusCode == 304 { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } else if resp.StatusCode >= 300 && resp.StatusCode < 400 { + if location := resp.Header.Get(HEADER_LOCATION_CAMEL); location != "" && redirectCount < maxRedirectCount { + redirectURL = location + doLog(LEVEL_WARN, "Redirect request to %s", redirectURL) + msg = resp.Status + maxRetryCount++ + redirectCount++ + if resp.StatusCode == 302 && method == HTTP_GET { + redirectFlag = true + } else { + redirectFlag = false + } + } else { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + break + } + } else { + msg = resp.Status + } + } + if i != maxRetryCount { + if resp != nil { + _err := resp.Body.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close resp body") + } + resp = nil + } + if _, ok := headers[HEADER_AUTH_CAMEL]; ok { + delete(headers, HEADER_AUTH_CAMEL) + } + doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg) + if r, ok := _data.(*strings.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*bytes.Reader); ok { + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*fileReaderWrapper); ok { + fd, err := os.Open(r.filePath) + if err != nil { + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close with reason: %v", errMsg) + } + }() + fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath} + fileReaderWrapper.mark = r.mark + fileReaderWrapper.reader = fd + fileReaderWrapper.totalCount = r.totalCount + _data = fileReaderWrapper + _, err = fd.Seek(r.mark, 0) + if err != nil { + return nil, err + } + } else if r, ok := _data.(*readerWrapper); ok { + _, err := r.seek(0, 0) + if err != nil { + return nil, err + } + } + time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second))) + } else { + doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg) + if resp != nil { + respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs) + resp = nil + } + } + } + return +} + +type connDelegate struct { + conn net.Conn + socketTimeout time.Duration + finalTimeout time.Duration +} + +func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate { + return &connDelegate{ + conn: conn, + socketTimeout: time.Second * time.Duration(socketTimeout), + finalTimeout: time.Second * time.Duration(finalTimeout), + } +} + +func (delegate *connDelegate) Read(b []byte) (n int, err error) { + setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + + n, err = delegate.conn.Read(b) + setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout)) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Write(b []byte) (n int, err error) { + setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout)) + flag := isDebugLogEnabled() + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + + n, err = delegate.conn.Write(b) + finalTimeout := time.Now().Add(delegate.finalTimeout) + setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout) + if setWriteDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr) + } + setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout) + if setReadDeadlineErr != nil && flag { + doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr) + } + return n, err +} + +func (delegate *connDelegate) Close() error { + return delegate.conn.Close() +} + +func (delegate *connDelegate) LocalAddr() net.Addr { + return delegate.conn.LocalAddr() +} + +func (delegate *connDelegate) RemoteAddr() net.Addr { + return delegate.conn.RemoteAddr() +} + +func (delegate *connDelegate) SetDeadline(t time.Time) error { + return delegate.conn.SetDeadline(t) +} + +func (delegate *connDelegate) SetReadDeadline(t time.Time) error { + return delegate.conn.SetReadDeadline(t) +} + +func (delegate *connDelegate) SetWriteDeadline(t time.Time) error { + return delegate.conn.SetWriteDeadline(t) +} diff --git a/modules/obs/log.go b/modules/obs/log.go new file mode 100755 index 000000000..8938e5e40 --- /dev/null +++ b/modules/obs/log.go @@ -0,0 +1,317 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +// Level defines the level of the log +type Level int + +const ( + LEVEL_OFF Level = 500 + LEVEL_ERROR Level = 400 + LEVEL_WARN Level = 300 + LEVEL_INFO Level = 200 + LEVEL_DEBUG Level = 100 +) + +var logLevelMap = map[Level]string{ + LEVEL_OFF: "[OFF]: ", + LEVEL_ERROR: "[ERROR]: ", + LEVEL_WARN: "[WARN]: ", + LEVEL_INFO: "[INFO]: ", + LEVEL_DEBUG: "[DEBUG]: ", +} + +type logConfType struct { + level Level + logToConsole bool + logFullPath string + maxLogSize int64 + backups int +} + +func getDefaultLogConf() logConfType { + return logConfType{ + level: LEVEL_WARN, + logToConsole: false, + logFullPath: "", + maxLogSize: 1024 * 1024 * 30, //30MB + backups: 10, + } +} + +var logConf logConfType + +type loggerWrapper struct { + fullPath string + fd *os.File + ch chan string + wg sync.WaitGroup + queue []string + logger *log.Logger + index int + cacheCount int + closed bool +} + +func (lw *loggerWrapper) doInit() { + lw.queue = make([]string, 0, lw.cacheCount) + lw.logger = log.New(lw.fd, "", 0) + lw.ch = make(chan string, lw.cacheCount) + lw.wg.Add(1) + go lw.doWrite() +} + +func (lw *loggerWrapper) rotate() { + stat, err := lw.fd.Stat() + if err != nil { + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + panic(err) + } + if stat.Size() >= logConf.maxLogSize { + _err := lw.fd.Sync() + if _err != nil { + panic(err) + } + _err = lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + if lw.index > logConf.backups { + lw.index = 1 + } + _err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index)) + if _err != nil { + panic(err) + } + lw.index++ + + fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + panic(err) + } + lw.fd = fd + lw.logger.SetOutput(lw.fd) + } +} + +func (lw *loggerWrapper) doFlush() { + lw.rotate() + for _, m := range lw.queue { + lw.logger.Println(m) + } + err := lw.fd.Sync() + if err != nil { + panic(err) + } +} + +func (lw *loggerWrapper) doClose() { + lw.closed = true + close(lw.ch) + lw.wg.Wait() +} + +func (lw *loggerWrapper) doWrite() { + defer lw.wg.Done() + for { + msg, ok := <-lw.ch + if !ok { + lw.doFlush() + _err := lw.fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + break + } + if len(lw.queue) >= lw.cacheCount { + lw.doFlush() + lw.queue = make([]string, 0, lw.cacheCount) + } + lw.queue = append(lw.queue, msg) + } + +} + +func (lw *loggerWrapper) Printf(format string, v ...interface{}) { + if !lw.closed { + msg := fmt.Sprintf(format, v...) + lw.ch <- msg + } +} + +var consoleLogger *log.Logger +var fileLogger *loggerWrapper +var lock = new(sync.RWMutex) + +func isDebugLogEnabled() bool { + return logConf.level <= LEVEL_DEBUG +} + +func isErrorLogEnabled() bool { + return logConf.level <= LEVEL_ERROR +} + +func isWarnLogEnabled() bool { + return logConf.level <= LEVEL_WARN +} + +func isInfoLogEnabled() bool { + return logConf.level <= LEVEL_INFO +} + +func reset() { + if fileLogger != nil { + fileLogger.doClose() + fileLogger = nil + } + consoleLogger = nil + logConf = getDefaultLogConf() +} + +// InitLog enable logging function with default cacheCnt +func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error { + return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50) +} + +// InitLogWithCacheCnt enable logging function +func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error { + lock.Lock() + defer lock.Unlock() + if cacheCnt <= 0 { + cacheCnt = 50 + } + reset() + if fullPath := strings.TrimSpace(logFullPath); fullPath != "" { + _fullPath, err := filepath.Abs(fullPath) + if err != nil { + return err + } + + if !strings.HasSuffix(_fullPath, ".log") { + _fullPath += ".log" + } + + stat, err := os.Stat(_fullPath) + if err == nil && stat.IsDir() { + return fmt.Errorf("logFullPath:[%s] is a directory", _fullPath) + } else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil { + return err + } + + fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return err + } + + if stat == nil { + stat, err = os.Stat(_fullPath) + if err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return err + } + } + + prefix := stat.Name() + "." + index := 1 + var timeIndex int64 = 0 + walkFunc := func(path string, info os.FileInfo, err error) error { + if err == nil { + if name := info.Name(); strings.HasPrefix(name, prefix) { + if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex { + timeIndex = info.ModTime().Unix() + index = i + 1 + } + } + } + return err + } + + if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil { + _err := fd.Close() + if _err != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err) + } + return err + } + + fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false} + fileLogger.doInit() + } + if maxLogSize > 0 { + logConf.maxLogSize = maxLogSize + } + if backups > 0 { + logConf.backups = backups + } + logConf.level = level + if logToConsole { + consoleLogger = log.New(os.Stdout, "", log.LstdFlags) + } + return nil +} + +// CloseLog disable logging and synchronize cache data to log files +func CloseLog() { + if logEnabled() { + lock.Lock() + defer lock.Unlock() + reset() + } +} + +func logEnabled() bool { + return consoleLogger != nil || fileLogger != nil +} + +// DoLog writes log messages to the logger +func DoLog(level Level, format string, v ...interface{}) { + doLog(level, format, v...) +} + +func doLog(level Level, format string, v ...interface{}) { + if logEnabled() && logConf.level <= level { + msg := fmt.Sprintf(format, v...) + if _, file, line, ok := runtime.Caller(1); ok { + index := strings.LastIndex(file, "/") + if index >= 0 { + file = file[index+1:] + } + msg = fmt.Sprintf("%s:%d|%s", file, line, msg) + } + prefix := logLevelMap[level] + if consoleLogger != nil { + consoleLogger.Printf("%s%s", prefix, msg) + } + if fileLogger != nil { + nowDate := FormatUtcNow("2006-01-02T15:04:05Z") + fileLogger.Printf("%s %s%s", nowDate, prefix, msg) + } + } +} diff --git a/modules/obs/model.go b/modules/obs/model.go new file mode 100755 index 000000000..8752b5198 --- /dev/null +++ b/modules/obs/model.go @@ -0,0 +1,1236 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "encoding/xml" + "io" + "net/http" + "time" +) + +// BaseModel defines base model response from OBS +type BaseModel struct { + StatusCode int `xml:"-"` + RequestId string `xml:"RequestId" json:"request_id"` + ResponseHeaders map[string][]string `xml:"-"` +} + +// Bucket defines bucket properties +type Bucket struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` + Location string `xml:"Location"` +} + +// Owner defines owner properties +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// Initiator defines initiator properties +type Initiator struct { + XMLName xml.Name `xml:"Initiator"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// ListBucketsInput is the input parameter of ListBuckets function +type ListBucketsInput struct { + QueryLocation bool +} + +// ListBucketsOutput is the result of ListBuckets function +type ListBucketsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Owner Owner `xml:"Owner"` + Buckets []Bucket `xml:"Buckets>Bucket"` +} + +type bucketLocationObs struct { + XMLName xml.Name `xml:"Location"` + Location string `xml:",chardata"` +} + +// BucketLocation defines bucket location configuration +type BucketLocation struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + Location string `xml:"LocationConstraint,omitempty"` +} + +// CreateBucketInput is the input parameter of CreateBucket function +type CreateBucketInput struct { + BucketLocation + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + StorageClass StorageClassType `xml:"-"` + GrantReadId string `xml:"-"` + GrantWriteId string `xml:"-"` + GrantReadAcpId string `xml:"-"` + GrantWriteAcpId string `xml:"-"` + GrantFullControlId string `xml:"-"` + GrantReadDeliveredId string `xml:"-"` + GrantFullControlDeliveredId string `xml:"-"` + Epid string `xml:"-"` + AvailableZone string `xml:"-"` +} + +// BucketStoragePolicy defines the bucket storage class +type BucketStoragePolicy struct { + XMLName xml.Name `xml:"StoragePolicy"` + StorageClass StorageClassType `xml:"DefaultStorageClass"` +} + +// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function +type SetBucketStoragePolicyInput struct { + Bucket string `xml:"-"` + BucketStoragePolicy +} + +type getBucketStoragePolicyOutputS3 struct { + BaseModel + BucketStoragePolicy +} + +// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function +type GetBucketStoragePolicyOutput struct { + BaseModel + StorageClass string +} + +type bucketStoragePolicyObs struct { + XMLName xml.Name `xml:"StorageClass"` + StorageClass string `xml:",chardata"` +} +type getBucketStoragePolicyOutputObs struct { + BaseModel + bucketStoragePolicyObs +} + +// ListObjsInput defines parameters for listing objects +type ListObjsInput struct { + Prefix string + MaxKeys int + Delimiter string + Origin string + RequestHeader string +} + +// ListObjectsInput is the input parameter of ListObjects function +type ListObjectsInput struct { + ListObjsInput + Bucket string + Marker string +} + +// Content defines the object content properties +type Content struct { + XMLName xml.Name `xml:"Contents"` + Owner Owner `xml:"Owner"` + ETag string `xml:"ETag"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + Size int64 `xml:"Size"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// ListObjectsOutput is the result of ListObjects function +type ListObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListBucketResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Contents []Content `xml:"Contents"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` +} + +// ListVersionsInput is the input parameter of ListVersions function +type ListVersionsInput struct { + ListObjsInput + Bucket string + KeyMarker string + VersionIdMarker string +} + +// Version defines the properties of versioning objects +type Version struct { + DeleteMarker + XMLName xml.Name `xml:"Version"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` +} + +// DeleteMarker defines the properties of versioning delete markers +type DeleteMarker struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + IsLatest bool `xml:"IsLatest"` + LastModified time.Time `xml:"LastModified"` + Owner Owner `xml:"Owner"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// ListVersionsOutput is the result of ListVersions function +type ListVersionsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListVersionsResult"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + VersionIdMarker string `xml:"VersionIdMarker"` + NextVersionIdMarker string `xml:"NextVersionIdMarker"` + MaxKeys int `xml:"MaxKeys"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Versions []Version `xml:"Version"` + DeleteMarkers []DeleteMarker `xml:"DeleteMarker"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` + Location string `xml:"-"` +} + +// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function +type ListMultipartUploadsInput struct { + Bucket string + Prefix string + MaxUploads int + Delimiter string + KeyMarker string + UploadIdMarker string +} + +// Upload defines multipart upload properties +type Upload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + Initiated time.Time `xml:"Initiated"` + StorageClass StorageClassType `xml:"StorageClass"` + Owner Owner `xml:"Owner"` + Initiator Initiator `xml:"Initiator"` +} + +// ListMultipartUploadsOutput is the result of ListMultipartUploads function +type ListMultipartUploadsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` + KeyMarker string `xml:"KeyMarker"` + NextKeyMarker string `xml:"NextKeyMarker"` + UploadIdMarker string `xml:"UploadIdMarker"` + NextUploadIdMarker string `xml:"NextUploadIdMarker"` + Delimiter string `xml:"Delimiter"` + IsTruncated bool `xml:"IsTruncated"` + MaxUploads int `xml:"MaxUploads"` + Prefix string `xml:"Prefix"` + Uploads []Upload `xml:"Upload"` + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// BucketQuota defines bucket quota configuration +type BucketQuota struct { + XMLName xml.Name `xml:"Quota"` + Quota int64 `xml:"StorageQuota"` +} + +// SetBucketQuotaInput is the input parameter of SetBucketQuota function +type SetBucketQuotaInput struct { + Bucket string `xml:"-"` + BucketQuota +} + +// GetBucketQuotaOutput is the result of GetBucketQuota function +type GetBucketQuotaOutput struct { + BaseModel + BucketQuota +} + +// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function +type GetBucketStorageInfoOutput struct { + BaseModel + XMLName xml.Name `xml:"GetBucketStorageInfoResult"` + Size int64 `xml:"Size"` + ObjectNumber int `xml:"ObjectNumber"` +} + +type getBucketLocationOutputS3 struct { + BaseModel + BucketLocation +} +type getBucketLocationOutputObs struct { + BaseModel + bucketLocationObs +} + +// GetBucketLocationOutput is the result of GetBucketLocation function +type GetBucketLocationOutput struct { + BaseModel + Location string `xml:"-"` +} + +// Grantee defines grantee properties +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI GroupUriType `xml:"URI,omitempty"` +} + +type granteeObs struct { + XMLName xml.Name `xml:"Grantee"` + Type GranteeType `xml:"type,attr"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + Canned string `xml:"Canned,omitempty"` +} + +// Grant defines grant properties +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} +type grantObs struct { + XMLName xml.Name `xml:"Grant"` + Grantee granteeObs `xml:"Grantee"` + Permission PermissionType `xml:"Permission"` + Delivered bool `xml:"Delivered"` +} + +// AccessControlPolicy defines access control policy properties +type AccessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []Grant `xml:"AccessControlList>Grant"` + Delivered string `xml:"Delivered,omitempty"` +} + +type accessControlPolicyObs struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner `xml:"Owner"` + Grants []grantObs `xml:"AccessControlList>Grant"` +} + +// GetBucketAclOutput is the result of GetBucketAcl function +type GetBucketAclOutput struct { + BaseModel + AccessControlPolicy +} + +type getBucketACLOutputObs struct { + BaseModel + accessControlPolicyObs +} + +// SetBucketAclInput is the input parameter of SetBucketAcl function +type SetBucketAclInput struct { + Bucket string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// SetBucketPolicyInput is the input parameter of SetBucketPolicy function +type SetBucketPolicyInput struct { + Bucket string + Policy string +} + +// GetBucketPolicyOutput is the result of GetBucketPolicy function +type GetBucketPolicyOutput struct { + BaseModel + Policy string `json:"body"` +} + +// CorsRule defines the CORS rules +type CorsRule struct { + XMLName xml.Name `xml:"CORSRule"` + ID string `xml:"ID,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin"` + AllowedMethod []string `xml:"AllowedMethod"` + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` +} + +// BucketCors defines the bucket CORS configuration +type BucketCors struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CorsRules []CorsRule `xml:"CORSRule"` +} + +// SetBucketCorsInput is the input parameter of SetBucketCors function +type SetBucketCorsInput struct { + Bucket string `xml:"-"` + BucketCors +} + +// GetBucketCorsOutput is the result of GetBucketCors function +type GetBucketCorsOutput struct { + BaseModel + BucketCors +} + +// BucketVersioningConfiguration defines the versioning configuration +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status VersioningStatusType `xml:"Status"` +} + +// SetBucketVersioningInput is the input parameter of SetBucketVersioning function +type SetBucketVersioningInput struct { + Bucket string `xml:"-"` + BucketVersioningConfiguration +} + +// GetBucketVersioningOutput is the result of GetBucketVersioning function +type GetBucketVersioningOutput struct { + BaseModel + BucketVersioningConfiguration +} + +// IndexDocument defines the default page configuration +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +// ErrorDocument defines the error page configuration +type ErrorDocument struct { + Key string `xml:"Key,omitempty"` +} + +// Condition defines condition in RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` + HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"` +} + +// Redirect defines redirect in RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName,omitempty"` + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` + HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"` +} + +// RoutingRule defines routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + Condition Condition `xml:"Condition,omitempty"` + Redirect Redirect `xml:"Redirect"` +} + +// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration +type RedirectAllRequestsTo struct { + XMLName xml.Name `xml:"RedirectAllRequestsTo"` + Protocol ProtocolType `xml:"Protocol,omitempty"` + HostName string `xml:"HostName"` +} + +// BucketWebsiteConfiguration defines the bucket website configuration +type BucketWebsiteConfiguration struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` +} + +// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function +type SetBucketWebsiteConfigurationInput struct { + Bucket string `xml:"-"` + BucketWebsiteConfiguration +} + +// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function +type GetBucketWebsiteConfigurationOutput struct { + BaseModel + BucketWebsiteConfiguration +} + +// GetBucketMetadataInput is the input parameter of GetBucketMetadata function +type GetBucketMetadataInput struct { + Bucket string + Origin string + RequestHeader string +} + +// SetObjectMetadataInput is the input parameter of SetObjectMetadata function +type SetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +//SetObjectMetadataOutput is the result of SetObjectMetadata function +type SetObjectMetadataOutput struct { + BaseModel + MetadataDirective MetadataDirectiveType + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + WebsiteRedirectLocation string + StorageClass StorageClassType + Metadata map[string]string +} + +// GetBucketMetadataOutput is the result of GetBucketMetadata function +type GetBucketMetadataOutput struct { + BaseModel + StorageClass StorageClassType + Location string + Version string + AllowOrigin string + AllowMethod string + AllowHeader string + MaxAgeSeconds int + ExposeHeader string + Epid string +} + +// BucketLoggingStatus defines the bucket logging configuration +type BucketLoggingStatus struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + Agency string `xml:"Agency,omitempty"` + TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"` + TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"` + TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"` +} + +// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function +type SetBucketLoggingConfigurationInput struct { + Bucket string `xml:"-"` + BucketLoggingStatus +} + +// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function +type GetBucketLoggingConfigurationOutput struct { + BaseModel + BucketLoggingStatus +} + +// Transition defines transition property in LifecycleRule +type Transition struct { + XMLName xml.Name `xml:"Transition"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// Expiration defines expiration property in LifecycleRule +type Expiration struct { + XMLName xml.Name `xml:"Expiration"` + Date time.Time `xml:"Date,omitempty"` + Days int `xml:"Days,omitempty"` +} + +// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays"` + StorageClass StorageClassType `xml:"StorageClass"` +} + +// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays"` +} + +// LifecycleRule defines lifecycle rule +type LifecycleRule struct { + ID string `xml:"ID,omitempty"` + Prefix string `xml:"Prefix"` + Status RuleStatusType `xml:"Status"` + Transitions []Transition `xml:"Transition,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty"` + NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` +} + +// BucketLifecyleConfiguration defines the bucket lifecycle configuration +type BucketLifecyleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + LifecycleRules []LifecycleRule `xml:"Rule"` +} + +// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function +type SetBucketLifecycleConfigurationInput struct { + Bucket string `xml:"-"` + BucketLifecyleConfiguration +} + +// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function +type GetBucketLifecycleConfigurationOutput struct { + BaseModel + BucketLifecyleConfiguration +} + +// Tag defines tag property in BucketTagging +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BucketTagging defines the bucket tag configuration +type BucketTagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag"` +} + +// SetBucketTaggingInput is the input parameter of SetBucketTagging function +type SetBucketTaggingInput struct { + Bucket string `xml:"-"` + BucketTagging +} + +// GetBucketTaggingOutput is the result of GetBucketTagging function +type GetBucketTaggingOutput struct { + BaseModel + BucketTagging +} + +// FilterRule defines filter rule in TopicConfiguration +type FilterRule struct { + XMLName xml.Name `xml:"FilterRule"` + Name string `xml:"Name,omitempty"` + Value string `xml:"Value,omitempty"` +} + +// TopicConfiguration defines the topic configuration +type TopicConfiguration struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []EventType `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>Object>FilterRule"` +} + +// BucketNotification defines the bucket notification configuration +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"` +} + +// SetBucketNotificationInput is the input parameter of SetBucketNotification function +type SetBucketNotificationInput struct { + Bucket string `xml:"-"` + BucketNotification +} + +type topicConfigurationS3 struct { + XMLName xml.Name `xml:"TopicConfiguration"` + ID string `xml:"Id,omitempty"` + Topic string `xml:"Topic"` + Events []string `xml:"Event"` + FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"` +} + +type bucketNotificationS3 struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"` +} + +type getBucketNotificationOutputS3 struct { + BaseModel + bucketNotificationS3 +} + +// GetBucketNotificationOutput is the result of GetBucketNotification function +type GetBucketNotificationOutput struct { + BaseModel + BucketNotification +} + +// DeleteObjectInput is the input parameter of DeleteObject function +type DeleteObjectInput struct { + Bucket string + Key string + VersionId string +} + +// DeleteObjectOutput is the result of DeleteObject function +type DeleteObjectOutput struct { + BaseModel + VersionId string + DeleteMarker bool +} + +// ObjectToDelete defines the object property in DeleteObjectsInput +type ObjectToDelete struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DeleteObjectsInput is the input parameter of DeleteObjects function +type DeleteObjectsInput struct { + Bucket string `xml:"-"` + XMLName xml.Name `xml:"Delete"` + Quiet bool `xml:"Quiet,omitempty"` + Objects []ObjectToDelete `xml:"Object"` +} + +// Deleted defines the deleted property in DeleteObjectsOutput +type Deleted struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + DeleteMarker bool `xml:"DeleteMarker"` + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` +} + +// Error defines the error property in DeleteObjectsOutput +type Error struct { + XMLName xml.Name `xml:"Error"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +// DeleteObjectsOutput is the result of DeleteObjects function +type DeleteObjectsOutput struct { + BaseModel + XMLName xml.Name `xml:"DeleteResult"` + Deleteds []Deleted `xml:"Deleted"` + Errors []Error `xml:"Error"` +} + +// SetObjectAclInput is the input parameter of SetObjectAcl function +type SetObjectAclInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + ACL AclType `xml:"-"` + AccessControlPolicy +} + +// GetObjectAclInput is the input parameter of GetObjectAcl function +type GetObjectAclInput struct { + Bucket string + Key string + VersionId string +} + +// GetObjectAclOutput is the result of GetObjectAcl function +type GetObjectAclOutput struct { + BaseModel + VersionId string + AccessControlPolicy +} + +// RestoreObjectInput is the input parameter of RestoreObject function +type RestoreObjectInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + VersionId string `xml:"-"` + XMLName xml.Name `xml:"RestoreRequest"` + Days int `xml:"Days"` + Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"` +} + +// ISseHeader defines the sse encryption header +type ISseHeader interface { + GetEncryption() string + GetKey() string +} + +// SseKmsHeader defines the SseKms header +type SseKmsHeader struct { + Encryption string + Key string + isObs bool +} + +// SseCHeader defines the SseC header +type SseCHeader struct { + Encryption string + Key string + KeyMD5 string +} + +// GetObjectMetadataInput is the input parameter of GetObjectMetadata function +type GetObjectMetadataInput struct { + Bucket string + Key string + VersionId string + Origin string + RequestHeader string + SseHeader ISseHeader +} + +// GetObjectMetadataOutput is the result of GetObjectMetadata function +type GetObjectMetadataOutput struct { + BaseModel + VersionId string + WebsiteRedirectLocation string + Expiration string + Restore string + ObjectType string + NextAppendPosition string + StorageClass StorageClassType + ContentLength int64 + ContentType string + ETag string + AllowOrigin string + AllowHeader string + AllowMethod string + ExposeHeader string + MaxAgeSeconds int + LastModified time.Time + SseHeader ISseHeader + Metadata map[string]string +} + +// GetObjectInput is the input parameter of GetObject function +type GetObjectInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfUnmodifiedSince time.Time + IfModifiedSince time.Time + RangeStart int64 + RangeEnd int64 + ImageProcess string + ResponseCacheControl string + ResponseContentDisposition string + ResponseContentEncoding string + ResponseContentLanguage string + ResponseContentType string + ResponseExpires string +} + +// GetObjectOutput is the result of GetObject function +type GetObjectOutput struct { + GetObjectMetadataOutput + DeleteMarker bool + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + Expires string + Body io.ReadCloser +} + +// ObjectOperationInput defines the object operation properties +type ObjectOperationInput struct { + Bucket string + Key string + ACL AclType + GrantReadId string + GrantReadAcpId string + GrantWriteAcpId string + GrantFullControlId string + StorageClass StorageClassType + WebsiteRedirectLocation string + Expires int64 + SseHeader ISseHeader + Metadata map[string]string +} + +// PutObjectBasicInput defines the basic object operation properties +type PutObjectBasicInput struct { + ObjectOperationInput + ContentType string + ContentMD5 string + ContentLength int64 +} + +// PutObjectInput is the input parameter of PutObject function +type PutObjectInput struct { + PutObjectBasicInput + Body io.Reader +} + +// PutFileInput is the input parameter of PutFile function +type PutFileInput struct { + PutObjectBasicInput + SourceFile string +} + +// PutObjectOutput is the result of PutObject function +type PutObjectOutput struct { + BaseModel + VersionId string + SseHeader ISseHeader + StorageClass StorageClassType + ETag string +} + +// CopyObjectInput is the input parameter of CopyObject function +type CopyObjectInput struct { + ObjectOperationInput + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceIfMatch string + CopySourceIfNoneMatch string + CopySourceIfUnmodifiedSince time.Time + CopySourceIfModifiedSince time.Time + SourceSseHeader ISseHeader + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string + Expires string + MetadataDirective MetadataDirectiveType + SuccessActionRedirect string +} + +// CopyObjectOutput is the result of CopyObject function +type CopyObjectOutput struct { + BaseModel + CopySourceVersionId string `xml:"-"` + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function +type AbortMultipartUploadInput struct { + Bucket string + Key string + UploadId string +} + +// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function +type InitiateMultipartUploadInput struct { + ObjectOperationInput + ContentType string +} + +// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function +type InitiateMultipartUploadOutput struct { + BaseModel + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + SseHeader ISseHeader +} + +// UploadPartInput is the input parameter of UploadPart function +type UploadPartInput struct { + Bucket string + Key string + PartNumber int + UploadId string + ContentMD5 string + SseHeader ISseHeader + Body io.Reader + SourceFile string + Offset int64 + PartSize int64 +} + +// UploadPartOutput is the result of UploadPart function +type UploadPartOutput struct { + BaseModel + PartNumber int + ETag string + SseHeader ISseHeader +} + +// Part defines the part properties +type Part struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified,omitempty"` + Size int64 `xml:"Size,omitempty"` +} + +// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function +type CompleteMultipartUploadInput struct { + Bucket string `xml:"-"` + Key string `xml:"-"` + UploadId string `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []Part `xml:"Part"` +} + +// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function +type CompleteMultipartUploadOutput struct { + BaseModel + VersionId string `xml:"-"` + SseHeader ISseHeader `xml:"-"` + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + ETag string `xml:"ETag"` +} + +// ListPartsInput is the input parameter of ListParts function +type ListPartsInput struct { + Bucket string + Key string + UploadId string + MaxParts int + PartNumberMarker int +} + +// ListPartsOutput is the result of ListParts function +type ListPartsOutput struct { + BaseModel + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + PartNumberMarker int `xml:"PartNumberMarker"` + NextPartNumberMarker int `xml:"NextPartNumberMarker"` + MaxParts int `xml:"MaxParts"` + IsTruncated bool `xml:"IsTruncated"` + StorageClass StorageClassType `xml:"StorageClass"` + Initiator Initiator `xml:"Initiator"` + Owner Owner `xml:"Owner"` + Parts []Part `xml:"Part"` +} + +// CopyPartInput is the input parameter of CopyPart function +type CopyPartInput struct { + Bucket string + Key string + UploadId string + PartNumber int + CopySourceBucket string + CopySourceKey string + CopySourceVersionId string + CopySourceRangeStart int64 + CopySourceRangeEnd int64 + SseHeader ISseHeader + SourceSseHeader ISseHeader +} + +// CopyPartOutput is the result of CopyPart function +type CopyPartOutput struct { + BaseModel + XMLName xml.Name `xml:"CopyPartResult"` + PartNumber int `xml:"-"` + ETag string `xml:"ETag"` + LastModified time.Time `xml:"LastModified"` + SseHeader ISseHeader `xml:"-"` +} + +// CreateSignedUrlInput is the input parameter of CreateSignedUrl function +type CreateSignedUrlInput struct { + Method HttpMethodType + Bucket string + Key string + SubResource SubResourceType + Expires int + Headers map[string]string + QueryParams map[string]string +} + +// CreateSignedUrlOutput is the result of CreateSignedUrl function +type CreateSignedUrlOutput struct { + SignedUrl string + ActualSignedRequestHeaders http.Header +} + +// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureInput struct { + Bucket string + Key string + Expires int + FormParams map[string]string +} + +// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function. +type CreateBrowserBasedSignatureOutput struct { + OriginPolicy string + Policy string + Algorithm string + Credential string + Date string + Signature string +} + +// HeadObjectInput is the input parameter of HeadObject function +type HeadObjectInput struct { + Bucket string + Key string + VersionId string +} + +// BucketPayer defines the request payment configuration +type BucketPayer struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer PayerType `xml:"Payer"` +} + +// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function +type SetBucketRequestPaymentInput struct { + Bucket string `xml:"-"` + BucketPayer +} + +// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function +type GetBucketRequestPaymentOutput struct { + BaseModel + BucketPayer +} + +// UploadFileInput is the input parameter of UploadFile function +type UploadFileInput struct { + ObjectOperationInput + ContentType string + UploadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string +} + +// DownloadFileInput is the input parameter of DownloadFile function +type DownloadFileInput struct { + GetObjectMetadataInput + IfMatch string + IfNoneMatch string + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + DownloadFile string + PartSize int64 + TaskNum int + EnableCheckpoint bool + CheckpointFile string +} + +// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function +type SetBucketFetchPolicyInput struct { + Bucket string + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function +type GetBucketFetchPolicyInput struct { + Bucket string +} + +// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function +type GetBucketFetchPolicyOutput struct { + BaseModel + FetchResponse `json:"fetch"` +} + +// FetchResponse defines the response fetch policy configuration +type FetchResponse struct { + Status FetchPolicyStatusType `json:"status"` + Agency string `json:"agency"` +} + +// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function +type DeleteBucketFetchPolicyInput struct { + Bucket string +} + +// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function +type SetBucketFetchJobInput struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host,omitempty"` + Key string `json:"key,omitempty"` + Md5 string `json:"md5,omitempty"` + CallBackURL string `json:"callbackurl,omitempty"` + CallBackBody string `json:"callbackbody,omitempty"` + CallBackBodyType string `json:"callbackbodytype,omitempty"` + CallBackHost string `json:"callbackhost,omitempty"` + FileType string `json:"file_type,omitempty"` + IgnoreSameKey bool `json:"ignore_same_key,omitempty"` + ObjectHeaders map[string]string `json:"objectheaders,omitempty"` + Etag string `json:"etag,omitempty"` + TrustName string `json:"trustname,omitempty"` +} + +// SetBucketFetchJobOutput is the result of SetBucketFetchJob function +type SetBucketFetchJobOutput struct { + BaseModel + SetBucketFetchJobResponse +} + +// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration +type SetBucketFetchJobResponse struct { + ID string `json:"id"` + Wait int `json:"Wait"` +} + +// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function +type GetBucketFetchJobInput struct { + Bucket string + JobID string +} + +// GetBucketFetchJobOutput is the result of GetBucketFetchJob function +type GetBucketFetchJobOutput struct { + BaseModel + GetBucketFetchJobResponse +} + +// GetBucketFetchJobResponse defines the response fetch job configuration +type GetBucketFetchJobResponse struct { + Err string `json:"err"` + Code string `json:"code"` + Status string `json:"status"` + Job JobResponse `json:"job"` +} + +// JobResponse defines the response job configuration +type JobResponse struct { + Bucket string `json:"bucket"` + URL string `json:"url"` + Host string `json:"host"` + Key string `json:"key"` + Md5 string `json:"md5"` + CallBackURL string `json:"callbackurl"` + CallBackBody string `json:"callbackbody"` + CallBackBodyType string `json:"callbackbodytype"` + CallBackHost string `json:"callbackhost"` + FileType string `json:"file_type"` + IgnoreSameKey bool `json:"ignore_same_key"` +} diff --git a/modules/obs/pool.go b/modules/obs/pool.go new file mode 100755 index 000000000..4596f0a16 --- /dev/null +++ b/modules/obs/pool.go @@ -0,0 +1,543 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" +) + +// Future defines interface with function: Get +type Future interface { + Get() interface{} +} + +// FutureResult for task result +type FutureResult struct { + result interface{} + resultChan chan interface{} + lock sync.Mutex +} + +type panicResult struct { + presult interface{} +} + +func (f *FutureResult) checkPanic() interface{} { + if r, ok := f.result.(panicResult); ok { + panic(r.presult) + } + return f.result +} + +// Get gets the task result +func (f *FutureResult) Get() interface{} { + if f.resultChan == nil { + return f.checkPanic() + } + f.lock.Lock() + defer f.lock.Unlock() + if f.resultChan == nil { + return f.checkPanic() + } + + f.result = <-f.resultChan + close(f.resultChan) + f.resultChan = nil + return f.checkPanic() +} + +// Task defines interface with function: Run +type Task interface { + Run() interface{} +} + +type funcWrapper struct { + f func() interface{} +} + +func (fw *funcWrapper) Run() interface{} { + if fw.f != nil { + return fw.f() + } + return nil +} + +type taskWrapper struct { + t Task + f *FutureResult +} + +func (tw *taskWrapper) Run() interface{} { + if tw.t != nil { + return tw.t.Run() + } + return nil +} + +type signalTask struct { + id string +} + +func (signalTask) Run() interface{} { + return nil +} + +type worker struct { + name string + taskQueue chan Task + wg *sync.WaitGroup + pool *RoutinePool +} + +func runTask(t Task) { + if tw, ok := t.(*taskWrapper); ok { + defer func() { + if r := recover(); r != nil { + tw.f.resultChan <- panicResult{ + presult: r, + } + } + }() + ret := t.Run() + tw.f.resultChan <- ret + } else { + t.Run() + } +} + +func (*worker) runTask(t Task) { + runTask(t) +} + +func (w *worker) start() { + go func() { + defer func() { + if w.wg != nil { + w.wg.Done() + } + }() + for { + task, ok := <-w.taskQueue + if !ok { + break + } + w.pool.AddCurrentWorkingCnt(1) + w.runTask(task) + w.pool.AddCurrentWorkingCnt(-1) + if w.pool.autoTuneWorker(w) { + break + } + } + }() +} + +func (w *worker) release() { + w.taskQueue = nil + w.wg = nil + w.pool = nil +} + +// Pool defines coroutine pool interface +type Pool interface { + ShutDown() + Submit(t Task) (Future, error) + SubmitFunc(f func() interface{}) (Future, error) + Execute(t Task) + ExecuteFunc(f func() interface{}) + GetMaxWorkerCnt() int64 + AddMaxWorkerCnt(value int64) int64 + GetCurrentWorkingCnt() int64 + AddCurrentWorkingCnt(value int64) int64 + GetWorkerCnt() int64 + AddWorkerCnt(value int64) int64 + EnableAutoTune() +} + +type basicPool struct { + maxWorkerCnt int64 + workerCnt int64 + currentWorkingCnt int64 + isShutDown int32 +} + +// ErrTaskInvalid will be returned if the task is nil +var ErrTaskInvalid = errors.New("Task is nil") + +func (pool *basicPool) GetCurrentWorkingCnt() int64 { + return atomic.LoadInt64(&pool.currentWorkingCnt) +} + +func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 { + return atomic.AddInt64(&pool.currentWorkingCnt, value) +} + +func (pool *basicPool) GetWorkerCnt() int64 { + return atomic.LoadInt64(&pool.workerCnt) +} + +func (pool *basicPool) AddWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.workerCnt, value) +} + +func (pool *basicPool) GetMaxWorkerCnt() int64 { + return atomic.LoadInt64(&pool.maxWorkerCnt) +} + +func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 { + return atomic.AddInt64(&pool.maxWorkerCnt, value) +} + +func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool { + return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue) +} + +func (pool *basicPool) EnableAutoTune() { + +} + +// RoutinePool defines the coroutine pool struct +type RoutinePool struct { + basicPool + taskQueue chan Task + dispatchQueue chan Task + workers map[string]*worker + cacheCnt int + wg *sync.WaitGroup + lock *sync.Mutex + shutDownWg *sync.WaitGroup + autoTune int32 +} + +// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function +var ErrSubmitTimeout = errors.New("Submit task timeout") + +// ErrPoolShutDown will be returned if RoutinePool is shutdown +var ErrPoolShutDown = errors.New("RoutinePool is shutdown") + +// ErrTaskReject will be returned if submit task is rejected +var ErrTaskReject = errors.New("Submit task is rejected") + +var closeQueue = signalTask{id: "closeQueue"} + +// NewRoutinePool creates a RoutinePool instance +func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &RoutinePool{ + cacheCnt: cacheCnt, + wg: new(sync.WaitGroup), + lock: new(sync.Mutex), + shutDownWg: new(sync.WaitGroup), + autoTune: 0, + } + pool.isShutDown = 0 + pool.maxWorkerCnt += int64(maxWorkerCnt) + if pool.cacheCnt <= 0 { + pool.taskQueue = make(chan Task) + } else { + pool.taskQueue = make(chan Task, pool.cacheCnt) + } + pool.workers = make(map[string]*worker, pool.maxWorkerCnt) + // dispatchQueue must not have length + pool.dispatchQueue = make(chan Task) + pool.dispatcher() + + return pool +} + +// EnableAutoTune sets the autoTune enabled +func (pool *RoutinePool) EnableAutoTune() { + atomic.StoreInt32(&pool.autoTune, 1) +} + +func (pool *RoutinePool) checkStatus(t Task) error { + if t == nil { + return ErrTaskInvalid + } + + if atomic.LoadInt32(&pool.isShutDown) == 1 { + return ErrPoolShutDown + } + return nil +} + +func (pool *RoutinePool) dispatcher() { + pool.shutDownWg.Add(1) + go func() { + for { + task, ok := <-pool.dispatchQueue + if !ok { + break + } + + if task == closeQueue { + close(pool.taskQueue) + pool.shutDownWg.Done() + continue + } + + if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() { + pool.addWorker() + } + + pool.taskQueue <- task + } + }() +} + +// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it +func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 { + if atomic.LoadInt32(&pool.autoTune) == 1 { + return pool.basicPool.AddMaxWorkerCnt(value) + } + return pool.GetMaxWorkerCnt() +} + +func (pool *RoutinePool) addWorker() { + if atomic.LoadInt32(&pool.autoTune) == 1 { + pool.lock.Lock() + defer pool.lock.Unlock() + } + w := &worker{} + w.name = fmt.Sprintf("woker-%d", len(pool.workers)) + w.taskQueue = pool.taskQueue + w.wg = pool.wg + pool.AddWorkerCnt(1) + w.pool = pool + pool.workers[w.name] = w + pool.wg.Add(1) + w.start() +} + +func (pool *RoutinePool) autoTuneWorker(w *worker) bool { + if atomic.LoadInt32(&pool.autoTune) == 0 { + return false + } + + if w == nil { + return false + } + + workerCnt := pool.GetWorkerCnt() + maxWorkerCnt := pool.GetMaxWorkerCnt() + if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) { + pool.lock.Lock() + defer pool.lock.Unlock() + delete(pool.workers, w.name) + w.wg.Done() + w.release() + return true + } + + return false +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *RoutinePool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Execute pushes the specified task to the dispatchQueue +func (pool *RoutinePool) Execute(t Task) { + if t != nil { + pool.dispatchQueue <- t + } +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} + +// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info +func (pool *RoutinePool) Submit(t Task) (Future, error) { + if err := pool.checkStatus(t); err != nil { + return nil, err + } + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + pool.dispatchQueue <- tw + return f, nil +} + +// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info. +// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time. +func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) { + if timeout <= 0 { + return pool.Submit(t) + } + if err := pool.checkStatus(t); err != nil { + return nil, err + } + timeoutChan := make(chan bool, 1) + go func() { + time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout))) + timeoutChan <- true + close(timeoutChan) + }() + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + select { + case pool.dispatchQueue <- tw: + return f, nil + case _, ok := <-timeoutChan: + if ok { + return nil, ErrSubmitTimeout + } + return nil, ErrSubmitTimeout + } +} + +func (pool *RoutinePool) beforeCloseDispatchQueue() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.dispatchQueue <- closeQueue + pool.wg.Wait() +} + +func (pool *RoutinePool) doCloseDispatchQueue() { + close(pool.dispatchQueue) + pool.shutDownWg.Wait() +} + +// ShutDown closes the RoutinePool instance +func (pool *RoutinePool) ShutDown() { + pool.beforeCloseDispatchQueue() + pool.doCloseDispatchQueue() + for _, w := range pool.workers { + w.release() + } + pool.workers = nil + pool.taskQueue = nil + pool.dispatchQueue = nil +} + +// NoChanPool defines the coroutine pool struct +type NoChanPool struct { + basicPool + wg *sync.WaitGroup + tokens chan interface{} +} + +// NewNochanPool creates a new NoChanPool instance +func NewNochanPool(maxWorkerCnt int) Pool { + if maxWorkerCnt <= 0 { + maxWorkerCnt = runtime.NumCPU() + } + + pool := &NoChanPool{ + wg: new(sync.WaitGroup), + tokens: make(chan interface{}, maxWorkerCnt), + } + pool.isShutDown = 0 + pool.AddMaxWorkerCnt(int64(maxWorkerCnt)) + + for i := 0; i < maxWorkerCnt; i++ { + pool.tokens <- struct{}{} + } + + return pool +} + +func (pool *NoChanPool) acquire() { + <-pool.tokens +} + +func (pool *NoChanPool) release() { + pool.tokens <- 1 +} + +func (pool *NoChanPool) execute(t Task) { + pool.wg.Add(1) + go func() { + pool.acquire() + defer func() { + pool.release() + pool.wg.Done() + }() + runTask(t) + }() +} + +// ShutDown closes the NoChanPool instance +func (pool *NoChanPool) ShutDown() { + if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) { + return + } + pool.wg.Wait() +} + +// Execute executes the specified task +func (pool *NoChanPool) Execute(t Task) { + if t != nil { + pool.execute(t) + } +} + +// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function +func (pool *NoChanPool) ExecuteFunc(f func() interface{}) { + fw := &funcWrapper{ + f: f, + } + pool.Execute(fw) +} + +// Submit executes the specified task, and returns the FutureResult and error info +func (pool *NoChanPool) Submit(t Task) (Future, error) { + if t == nil { + return nil, ErrTaskInvalid + } + + f := &FutureResult{} + f.resultChan = make(chan interface{}, 1) + tw := &taskWrapper{ + t: t, + f: f, + } + + pool.execute(tw) + return f, nil +} + +// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function +func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) { + fw := &funcWrapper{ + f: f, + } + return pool.Submit(fw) +} diff --git a/modules/obs/temporary.go b/modules/obs/temporary.go new file mode 100755 index 000000000..bfaeb8197 --- /dev/null +++ b/modules/obs/temporary.go @@ -0,0 +1,790 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error +func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) { + if input == nil { + return nil, errors.New("CreateSignedUrlInput is nil") + } + + params := make(map[string]string, len(input.QueryParams)) + for key, value := range input.QueryParams { + params[key] = value + } + + if input.SubResource != "" { + params[string(input.SubResource)] = "" + } + + headers := make(map[string][]string, len(input.Headers)) + for key, value := range input.Headers { + headers[key] = []string{value} + } + + if input.Expires <= 0 { + input.Expires = 300 + } + + requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires)) + if err != nil { + return nil, err + } + + output = &CreateSignedUrlOutput{ + SignedUrl: requestURL, + ActualSignedRequestHeaders: headers, + } + return +} + +func (obsClient ObsClient) isSecurityToken(params map[string]string) { + if obsClient.conf.securityProvider.securityToken != "" { + if obsClient.conf.signature == SignatureObs { + params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken + } else { + params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken + } + } +} + +// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput, +// and returns the CreateBrowserBasedSignatureOutput and error +func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) { + if input == nil { + return nil, errors.New("CreateBrowserBasedSignatureInput is nil") + } + + params := make(map[string]string, len(input.FormParams)) + for key, value := range input.FormParams { + params[key] = value + } + + date := time.Now().UTC() + shortDate := date.Format(SHORT_DATE_FORMAT) + longDate := date.Format(LONG_DATE_FORMAT) + + credential, _ := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate) + + if input.Expires <= 0 { + input.Expires = 300 + } + + expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT) + if obsClient.conf.signature == SignatureV4 { + params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX + params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + params[PARAM_DATE_AMZ_CAMEL] = longDate + } + + obsClient.isSecurityToken(params) + + matchAnyBucket := true + matchAnyKey := true + count := 5 + if bucket := strings.TrimSpace(input.Bucket); bucket != "" { + params["bucket"] = bucket + matchAnyBucket = false + count-- + } + + if key := strings.TrimSpace(input.Key); key != "" { + params["key"] = key + matchAnyKey = false + count-- + } + + originPolicySlice := make([]string, 0, len(params)+count) + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration)) + originPolicySlice = append(originPolicySlice, "\"conditions\":[") + for key, value := range params { + if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" { + originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value)) + } + } + + if matchAnyBucket { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],") + } + + if matchAnyKey { + originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],") + } + + originPolicySlice = append(originPolicySlice, "]}") + + originPolicy := strings.Join(originPolicySlice, "") + policy := Base64Encode([]byte(originPolicy)) + var signature string + if obsClient.conf.signature == SignatureV4 { + signature = getSignature(policy, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate) + } else { + signature = Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(policy))) + } + + output = &CreateBrowserBasedSignatureOutput{ + OriginPolicy: originPolicy, + Policy: policy, + Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL], + Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL], + Date: params[PARAM_DATE_AMZ_CAMEL], + Signature: signature, + } + return +} + +// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers +func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) { + output = &ListBucketsOutput{} + err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) { + output = &GetBucketStoragePolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) { + output = &ListObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) { + output = &ListVersionsOutput{} + err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok { + output.Location = location[0] + } + } + return +} + +// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a +// specified bucket with the specified signed url and signed request headers +func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) { + output = &ListMultipartUploadsOutput{} + err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) { + output = &GetBucketQuotaOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers +func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) { + output = &GetBucketMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetBucketMetadataOutput(output) + } + return +} + +// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) { + output = &GetBucketStorageInfoOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) { + output = &GetBucketLocationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) { + output = &GetBucketAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) { + output = &GetBucketPolicyOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false) + if err != nil { + output = nil + } + return +} + +// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) { + output = &GetBucketCorsOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) { + output = &GetBucketVersioningOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) { + output = &GetBucketWebsiteConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) { + output = &GetBucketLoggingConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) { + output = &GetBucketLifecycleConfigurationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) { + output = &GetBucketTaggingOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) { + output = &GetBucketNotificationOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers +func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) { + output = &DeleteObjectOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseDeleteObjectOutput(output) + } + return +} + +// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data +func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) { + output = &DeleteObjectsOutput{} + err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) { + output = &GetObjectAclOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok { + output.VersionId = versionID[0] + } + } + return +} + +// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data +func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) { + output = &GetObjectMetadataOutput{} + err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectMetadataOutput(output) + } + return +} + +// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers +func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) { + output = &GetObjectOutput{} + err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseGetObjectOutput(output) + } + return +} + +// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) { + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path +func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) { + var data io.Reader + sourceFile = strings.TrimSpace(sourceFile) + if sourceFile != "" { + fd, _err := os.Open(sourceFile) + if _err != nil { + err = _err + return nil, err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg) + } + }() + + stat, _err := fd.Stat() + if _err != nil { + err = _err + return nil, err + } + fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile} + fileReaderWrapper.reader = fd + + var contentLength int64 + if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok { + contentLength = StringToInt64(value[0], -1) + } else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok { + contentLength = StringToInt64(value[0], -1) + } else { + contentLength = stat.Size() + } + if contentLength > stat.Size() { + return nil, errors.New("ContentLength is larger than fileSize") + } + fileReaderWrapper.totalCount = contentLength + data = fileReaderWrapper + } + + output = &PutObjectOutput{} + err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParsePutObjectOutput(output) + } + return +} + +// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers +func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) { + output = &CopyObjectOutput{} + err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyObjectOutput(output) + } + return +} + +// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers +func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) { + output = &InitiateMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseInitiateMultipartUploadOutput(output) + } + return +} + +// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) { + output = &UploadPartOutput{} + err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseUploadPartOutput(output) + } + return +} + +// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID +// with the specified signed url and signed request headers and data +func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) { + output = &CompleteMultipartUploadOutput{} + err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } else { + ParseCompleteMultipartUploadOutput(output) + } + return +} + +// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) { + output = &ListPartsOutput{} + err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} + +// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers +func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) { + output = &CopyPartOutput{} + err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } else { + ParseCopyPartOutput(output) + } + return +} + +// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data +func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) { + output = &BaseModel{} + err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true) + if err != nil { + output = nil + } + return +} + +// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers +func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) { + output = &GetBucketRequestPaymentOutput{} + err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true) + if err != nil { + output = nil + } + return +} diff --git a/modules/obs/trait.go b/modules/obs/trait.go new file mode 100755 index 000000000..9a59d6a71 --- /dev/null +++ b/modules/obs/trait.go @@ -0,0 +1,909 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:structcheck, unused +//nolint:golint, unused +package obs + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +// IReadCloser defines interface with function: setReadCloser +type IReadCloser interface { + setReadCloser(body io.ReadCloser) +} + +func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) { + output.Body = body +} + +func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) { + if isObs { + header = HEADER_PREFIX_OBS + header + headers[header] = headerValue + } else { + header = HEADER_PREFIX + header + headers[header] = headerValue + } +} + +func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) { + if isObs { + headers[header] = headerValue + } else { + headers[headerNext] = headerValue + } +} + +// IBaseModel defines interface for base response model +type IBaseModel interface { + setStatusCode(statusCode int) + + setRequestID(requestID string) + + setResponseHeaders(responseHeaders map[string][]string) +} + +// ISerializable defines interface with function: trans +type ISerializable interface { + trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) +} + +// DefaultSerializable defines default serializable struct +type DefaultSerializable struct { + params map[string]string + headers map[string][]string + data interface{} +} + +func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) { + return input.params, input.headers, input.data, nil +} + +var defaultSerializable = &DefaultSerializable{} + +func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable { + return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil} +} + +func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(subResource): ""} + data, err = ConvertRequestToIoReader(input) + return +} + +func (baseModel *BaseModel) setStatusCode(statusCode int) { + baseModel.StatusCode = statusCode +} + +func (baseModel *BaseModel) setRequestID(requestID string) { + baseModel.RequestId = requestID +} + +func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) { + baseModel.ResponseHeaders = responseHeaders +} + +func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if input.QueryLocation && !isObs { + setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs) + } + return +} + +func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) { + if grantReadID := input.GrantReadId; grantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs) + } + if grantWriteID := input.GrantWriteId; grantWriteID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs) + } + if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs) + } + if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs) + } + if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs) + } + if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true) + } + if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true) + } +} + +func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs) + } + if epid := input.Epid; epid != "" { + setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs) + } + if availableZone := input.AvailableZone; availableZone != "" { + setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs) + } + + input.prepareGrantHeaders(headers, isObs) + if location := strings.TrimSpace(input.Location); location != "" { + input.Location = location + + xml := make([]string, 0, 3) + xml = append(xml, "") + if isObs { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } else { + xml = append(xml, fmt.Sprintf("%s", input.Location)) + } + xml = append(xml, "") + + data = strings.Join(xml, "") + } + return +} + +func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + xml := make([]string, 0, 1) + if !isObs { + storageClass := "STANDARD" + if input.StorageClass == StorageClassWarm { + storageClass = string(storageClassStandardIA) + } else if input.StorageClass == StorageClassCold { + storageClass = string(storageClassGlacier) + } + params = map[string]string{string(SubResourceStoragePolicy): ""} + xml = append(xml, fmt.Sprintf("%s", storageClass)) + } else { + if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold { + input.StorageClass = StorageClassStandard + } + params = map[string]string{string(SubResourceStorageClass): ""} + xml = append(xml, fmt.Sprintf("%s", input.StorageClass)) + } + data = strings.Join(xml, "") + return +} + +func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxKeys > 0 { + params["max-keys"] = IntToString(input.MaxKeys) + } + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + if input.Marker != "" { + params["marker"] = input.Marker + } + return +} + +func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ListObjsInput.trans(isObs) + if err != nil { + return + } + params[string(SubResourceVersions)] = "" + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.VersionIdMarker != "" { + params["version-id-marker"] = input.VersionIdMarker + } + return +} + +func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceUploads): ""} + if input.Prefix != "" { + params["prefix"] = input.Prefix + } + if input.Delimiter != "" { + params["delimiter"] = input.Delimiter + } + if input.MaxUploads > 0 { + params["max-uploads"] = IntToString(input.MaxUploads) + } + if input.KeyMarker != "" { + params["key-marker"] = input.KeyMarker + } + if input.UploadIdMarker != "" { + params["upload-id-marker"] = input.UploadIdMarker + } + return +} + +func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceQuota, input) +} + +func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + headers = make(map[string][]string) + + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourcePolicy): ""} + data = strings.NewReader(input.Policy) + return +} + +func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceCors): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceVersioning, input) +} + +func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceWebsite): ""} + data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false) + return +} + +func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + if origin := strings.TrimSpace(input.Origin); origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{origin} + } + if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader} + } + return +} + +func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLogging): ""} + data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs) + return +} + +func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceLifecycle): ""} + data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs) + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceTagging): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceNotification): ""} + data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs) + return +} + +func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceDelete): ""} + data, md5, err := ConvertRequestToIoReaderV2(input) + if err != nil { + return + } + headers = map[string][]string{HEADER_MD5_CAMEL: {md5}} + return +} + +func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } else { + data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs) + } + return +} + +func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceAcl): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{string(SubResourceRestore): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + if !isObs { + data, err = ConvertRequestToIoReader(input) + } else { + data = ConverntObsRestoreToXml(input) + } + return +} + +// GetEncryption gets the Encryption field value from SseKmsHeader +func (header SseKmsHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + if !header.isObs { + return DEFAULT_SSE_KMS_ENCRYPTION + } + return DEFAULT_SSE_KMS_ENCRYPTION_OBS +} + +// GetKey gets the Key field value from SseKmsHeader +func (header SseKmsHeader) GetKey() string { + return header.Key +} + +// GetEncryption gets the Encryption field value from SseCHeader +func (header SseCHeader) GetEncryption() string { + if header.Encryption != "" { + return header.Encryption + } + return DEFAULT_SSE_C_ENCRYPTION +} + +// GetKey gets the Key field value from SseCHeader +func (header SseCHeader) GetKey() string { + return header.Key +} + +// GetKeyMD5 gets the KeyMD5 field value from SseCHeader +func (header SseCHeader) GetKeyMD5() string { + if header.KeyMD5 != "" { + return header.KeyMD5 + } + + if ret, err := Base64Decode(header.GetKey()); err == nil { + return Base64Md5(ret) + } + return "" +} + +func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) { + if sseHeader != nil { + if sseCHeader, ok := sseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok { + sseKmsHeader.isObs = isObs + setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs) + if sseKmsHeader.GetKey() != "" { + setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs) + } + } + } +} + +func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if input.Origin != "" { + headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin} + } + + if input.RequestHeader != "" { + headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader} + } + setSseHeader(headers, input.SseHeader, true, isObs) + return +} + +func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) { + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage} + } + + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } +} + +func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) { + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } +} + +func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + params = map[string]string{string(SubResourceMetadata): ""} + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + headers = make(map[string][]string) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs) + } else { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs) + } + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl} + } + input.prepareContentHeaders(headers) + if input.Expires != "" { + headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires} + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + } + input.prepareStorageClass(headers, isObs) + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input GetObjectInput) prepareResponseParams(params map[string]string) { + if input.ResponseCacheControl != "" { + params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl + } + if input.ResponseContentDisposition != "" { + params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition + } + if input.ResponseContentEncoding != "" { + params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding + } + if input.ResponseContentLanguage != "" { + params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage + } + if input.ResponseContentType != "" { + params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType + } + if input.ResponseExpires != "" { + params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires + } +} + +func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.GetObjectMetadataInput.trans(isObs) + if err != nil { + return + } + input.prepareResponseParams(params) + if input.ImageProcess != "" { + params[PARAM_IMAGE_PROCESS] = input.ImageProcess + } + if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart { + headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)} + } + + if input.IfMatch != "" { + headers[HEADER_IF_MATCH] = []string{input.IfMatch} + } + if input.IfNoneMatch != "" { + headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch} + } + if !input.IfModifiedSince.IsZero() { + headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)} + } + if !input.IfUnmodifiedSince.IsZero() { + headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)} + } + return +} + +func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) { + if GrantReadID := input.GrantReadId; GrantReadID != "" { + setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true) + } + if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" { + setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true) + } + if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" { + setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true) + } + if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" { + setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true) + } +} + +func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string) + params = make(map[string]string) + if acl := string(input.ACL); acl != "" { + setHeaders(headers, HEADER_ACL, []string{acl}, isObs) + } + input.prepareGrantHeaders(headers) + if storageClass := string(input.StorageClass); storageClass != "" { + if !isObs { + if storageClass == string(StorageClassWarm) { + storageClass = string(storageClassStandardIA) + } else if storageClass == string(StorageClassCold) { + storageClass = string(storageClassGlacier) + } + } + setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs) + } + if input.WebsiteRedirectLocation != "" { + setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs) + + } + setSseHeader(headers, input.SseHeader, false, isObs) + if input.Expires != 0 { + setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true) + } + if input.Metadata != nil { + for key, value := range input.Metadata { + key = strings.TrimSpace(key) + setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs) + } + } + return +} + +func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + + if input.ContentLength > 0 { + headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + + return +} + +func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.PutObjectBasicInput.trans(isObs) + if err != nil { + return + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) { + if input.CacheControl != "" { + headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl} + } + if input.ContentDisposition != "" { + headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition} + } + if input.ContentEncoding != "" { + headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding} + } + if input.ContentLanguage != "" { + headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage} + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE] = []string{input.ContentType} + } + if input.Expires != "" { + headers[HEADER_EXPIRES] = []string{input.Expires} + } +} + +func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) { + if input.CopySourceIfMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs) + } + if input.CopySourceIfNoneMatch != "" { + setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs) + } + if !input.CopySourceIfModifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs) + } + if !input.CopySourceIfUnmodifiedSince.IsZero() { + setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs) + } +} + +func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + + if directive := string(input.MetadataDirective); directive != "" { + setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs) + } + + if input.MetadataDirective == ReplaceMetadata { + input.prepareReplaceHeaders(headers) + } + + input.prepareCopySourceHeaders(headers, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + } + if input.SuccessActionRedirect != "" { + headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect} + } + return +} + +func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + return +} + +func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params, headers, data, err = input.ObjectOperationInput.trans(isObs) + if err != nil { + return + } + if input.ContentType != "" { + headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType} + } + params[string(SubResourceUploads)] = "" + return +} + +func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string) + setSseHeader(headers, input.SseHeader, true, isObs) + if input.ContentMD5 != "" { + headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5} + } + if input.Body != nil { + data = input.Body + } + return +} + +func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + data, _ = ConvertCompleteMultipartUploadInputToXml(input, false) + return +} + +func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId} + if input.MaxParts > 0 { + params["max-parts"] = IntToString(input.MaxParts) + } + if input.PartNumberMarker > 0 { + params["part-number-marker"] = IntToString(input.PartNumberMarker) + } + return +} + +func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)} + headers = make(map[string][]string, 1) + var copySource string + if input.CopySourceVersionId != "" { + copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId) + } else { + copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false)) + } + setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs) + if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart { + setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs) + } + + setSseHeader(headers, input.SseHeader, true, isObs) + if input.SourceSseHeader != nil { + if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok { + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs) + setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs) + } + + } + return +} + +func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + params = make(map[string]string) + if input.VersionId != "" { + params[PARAM_VERSION_ID] = input.VersionId + } + return +} + +func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + return trans(SubResourceRequestPayment, input) +} + +type partSlice []Part + +func (parts partSlice) Len() int { + return len(parts) +} + +func (parts partSlice) Less(i, j int) bool { + return parts[i].PartNumber < parts[j].PartNumber +} + +func (parts partSlice) Swap(i, j int) { + parts[i], parts[j] = parts[j], parts[i] +} + +type readerWrapper struct { + reader io.Reader + mark int64 + totalCount int64 + readedCount int64 +} + +func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) { + if r, ok := rw.reader.(*strings.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*bytes.Reader); ok { + return r.Seek(offset, whence) + } else if r, ok := rw.reader.(*os.File); ok { + return r.Seek(offset, whence) + } + return offset, nil +} + +func (rw *readerWrapper) Read(p []byte) (n int, err error) { + if rw.totalCount == 0 { + return 0, io.EOF + } + if rw.totalCount > 0 { + n, err = rw.reader.Read(p) + readedOnce := int64(n) + remainCount := rw.totalCount - rw.readedCount + if remainCount > readedOnce { + rw.readedCount += readedOnce + return n, err + } + rw.readedCount += remainCount + return int(remainCount), io.EOF + } + return rw.reader.Read(p) +} + +type fileReaderWrapper struct { + readerWrapper + filePath string +} + +func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchPolicyToJSON(input) + return +} + +func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} + +func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + contentType, _ := mimeTypes["json"] + headers = make(map[string][]string, 2) + headers[HEADER_CONTENT_TYPE] = []string{contentType} + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + data, err = convertFetchJobToJSON(input) + return +} + +func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) { + headers = make(map[string][]string, 1) + setHeaders(headers, headerOefMarker, []string{"yes"}, isObs) + return +} diff --git a/modules/obs/transfer.go b/modules/obs/transfer.go new file mode 100755 index 000000000..4dc50c0f9 --- /dev/null +++ b/modules/obs/transfer.go @@ -0,0 +1,873 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "syscall" +) + +var errAbort = errors.New("AbortError") + +// FileStatus defines the upload file properties +type FileStatus struct { + XMLName xml.Name `xml:"FileInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` +} + +// UploadPartInfo defines the upload part properties +type UploadPartInfo struct { + XMLName xml.Name `xml:"UploadPart"` + PartNumber int `xml:"PartNumber"` + Etag string `xml:"Etag"` + PartSize int64 `xml:"PartSize"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// UploadCheckpoint defines the upload checkpoint file properties +type UploadCheckpoint struct { + XMLName xml.Name `xml:"UploadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId,omitempty"` + UploadFile string `xml:"FileUrl"` + FileInfo FileStatus `xml:"FileInfo"` + UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"` +} + +func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool { + if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.") + return false + } + + if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.") + return false + } + + if ufc.UploadId == "" { + doLog(LEVEL_INFO, "UploadId is invalid. clear the record.") + return false + } + + return true +} + +type uploadPartTask struct { + UploadPartInput + obsClient *ObsClient + abort *int32 + extensions []extensionOptions + enableCheckpoint bool +} + +func (task *uploadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + + input := &UploadPartInput{} + input.Bucket = task.Bucket + input.Key = task.Key + input.PartNumber = task.PartNumber + input.UploadId = task.UploadId + input.SseHeader = task.SseHeader + input.SourceFile = task.SourceFile + input.Offset = task.Offset + input.PartSize = task.PartSize + extensions := task.extensions + + var output *UploadPartOutput + var err error + if extensions != nil { + output, err = task.obsClient.UploadPart(input, extensions...) + } else { + output, err = task.obsClient.UploadPart(input) + } + + if err == nil { + if output.ETag == "" { + doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber) + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber) + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber) + } + return err +} + +func loadCheckpointFile(checkpointFile string, result interface{}) error { + ret, err := ioutil.ReadFile(checkpointFile) + if err != nil { + return err + } + if len(ret) == 0 { + return nil + } + return xml.Unmarshal(ret, result) +} + +func updateCheckpointFile(fc interface{}, checkpointFilePath string) error { + result, err := xml.Marshal(fc) + if err != nil { + return err + } + err = ioutil.WriteFile(checkpointFilePath, result, 0666) + return err +} + +func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, ufc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) { + if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err)) + } + } else { + return false, nil + } + + return true, nil +} + +func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error { + initiateInput := &InitiateMultipartUploadInput{} + initiateInput.ObjectOperationInput = input.ObjectOperationInput + initiateInput.ContentType = input.ContentType + var output *InitiateMultipartUploadOutput + var err error + if extensions != nil { + output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...) + } else { + output, err = obsClient.InitiateMultipartUpload(initiateInput) + } + if err != nil { + return err + } + + ufc.Bucket = input.Bucket + ufc.Key = input.Key + ufc.UploadFile = input.UploadFile + ufc.FileInfo = FileStatus{} + ufc.FileInfo.Size = uploadFileStat.Size() + ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix() + ufc.UploadId = output.UploadId + + err = sliceFile(input.PartSize, ufc) + return err +} + +func sliceFile(partSize int64, ufc *UploadCheckpoint) error { + fileSize := ufc.FileInfo.Size + cnt := fileSize / partSize + if cnt >= 10000 { + partSize = fileSize / 10000 + if fileSize%10000 != 0 { + partSize++ + } + cnt = fileSize / partSize + } + if fileSize%partSize != 0 { + cnt++ + } + + if partSize > MAX_PART_SIZE { + doLog(LEVEL_ERROR, "The source upload file is too large") + return fmt.Errorf("The source upload file is too large") + } + + if cnt == 0 { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = 1 + ufc.UploadParts = []UploadPartInfo{uploadPart} + } else { + uploadParts := make([]UploadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + uploadPart := UploadPartInfo{} + uploadPart.PartNumber = int(i) + 1 + uploadPart.PartSize = partSize + uploadPart.Offset = i * partSize + uploadParts = append(uploadParts, uploadPart) + } + if value := fileSize % partSize; value != 0 { + uploadParts[cnt-1].PartSize = value + } + ufc.UploadParts = uploadParts + } + return nil +} + +func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error { + input := &AbortMultipartUploadInput{} + input.Bucket = bucket + input.Key = key + input.UploadId = uploadID + if extensions != nil { + _, err := obsClient.AbortMultipartUpload(input, extensions...) + return err + } + _, err := obsClient.AbortMultipartUpload(input) + return err +} + +func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error { + if uploadPartError != nil { + if enableCheckpoint { + return uploadPartError + } + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return uploadPartError + } + return nil +} + +func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + completeInput := &CompleteMultipartUploadInput{} + completeInput.Bucket = ufc.Bucket + completeInput.Key = ufc.Key + completeInput.UploadId = ufc.UploadId + parts := make([]Part, 0, len(ufc.UploadParts)) + for _, uploadPart := range ufc.UploadParts { + part := Part{} + part.PartNumber = uploadPart.PartNumber + part.ETag = uploadPart.Etag + parts = append(parts, part) + } + completeInput.Parts = parts + var completeOutput *CompleteMultipartUploadOutput + if extensions != nil { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...) + } else { + completeOutput, err = obsClient.CompleteMultipartUpload(completeInput) + } + + if err == nil { + if enableCheckpoint { + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err) + } + } + return completeOutput, err + } + if !enableCheckpoint { + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + } + return completeOutput, err +} + +func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) { + uploadFileStat, err := os.Stat(input.UploadFile) + if err != nil { + doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err)) + return nil, err + } + if uploadFileStat.IsDir() { + doLog(LEVEL_ERROR, "UploadFile can not be a folder.") + return nil, errors.New("uploadFile can not be a folder") + } + + ufc := &UploadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + } + if needCheckpoint { + err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions) + if err != nil { + return nil, err + } + + if enableCheckpoint { + err = updateCheckpointFile(ufc, checkpointFilePath) + if err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err) + _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions) + if _err != nil { + doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId) + } + return nil, err + } + } + } + + uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions) + err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions) + if err != nil { + return nil, err + } + + completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, extensions) + + return completeOutput, err +} + +func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) { + if uploadPartOutput, ok := result.(*UploadPartOutput); ok { + lock.Lock() + defer lock.Unlock() + ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag + ufc.UploadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(ufc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var uploadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, uploadPart := range ufc.UploadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if uploadPart.IsCompleted { + continue + } + task := uploadPartTask{ + UploadPartInput: UploadPartInput{ + Bucket: ufc.Bucket, + Key: ufc.Key, + PartNumber: uploadPart.PartNumber, + UploadId: ufc.UploadId, + SseHeader: input.SseHeader, + SourceFile: input.UploadFile, + Offset: uploadPart.Offset, + PartSize: uploadPart.PartSize, + }, + obsClient: &obsClient, + abort: &abort, + extensions: extensions, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + uploadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := uploadPartError.Load().(error); ok { + return err + } + return nil +} + +// ObjectInfo defines download object info +type ObjectInfo struct { + XMLName xml.Name `xml:"ObjectInfo"` + LastModified int64 `xml:"LastModified"` + Size int64 `xml:"Size"` + ETag string `xml:"ETag"` +} + +// TempFileInfo defines temp download file properties +type TempFileInfo struct { + XMLName xml.Name `xml:"TempFileInfo"` + TempFileUrl string `xml:"TempFileUrl"` + Size int64 `xml:"Size"` +} + +// DownloadPartInfo defines download part properties +type DownloadPartInfo struct { + XMLName xml.Name `xml:"DownloadPart"` + PartNumber int64 `xml:"PartNumber"` + RangeEnd int64 `xml:"RangeEnd"` + Offset int64 `xml:"Offset"` + IsCompleted bool `xml:"IsCompleted"` +} + +// DownloadCheckpoint defines download checkpoint file properties +type DownloadCheckpoint struct { + XMLName xml.Name `xml:"DownloadFileCheckpoint"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` + DownloadFile string `xml:"FileUrl"` + ObjectInfo ObjectInfo `xml:"ObjectInfo"` + TempFileInfo TempFileInfo `xml:"TempFileInfo"` + DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"` +} + +func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool { + if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.") + return false + } + if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.") + return false + } + if dfc.TempFileInfo.Size != output.ContentLength { + doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.") + return false + } + stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl) + if err != nil || stat.Size() != dfc.ObjectInfo.Size { + doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.") + return false + } + + return true +} + +type downloadPartTask struct { + GetObjectInput + obsClient *ObsClient + extensions []extensionOptions + abort *int32 + partNumber int64 + tempFileURL string + enableCheckpoint bool +} + +func (task *downloadPartTask) Run() interface{} { + if atomic.LoadInt32(task.abort) == 1 { + return errAbort + } + getObjectInput := &GetObjectInput{} + getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput + getObjectInput.IfMatch = task.IfMatch + getObjectInput.IfNoneMatch = task.IfNoneMatch + getObjectInput.IfModifiedSince = task.IfModifiedSince + getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince + getObjectInput.RangeStart = task.RangeStart + getObjectInput.RangeEnd = task.RangeEnd + + var output *GetObjectOutput + var err error + if task.extensions != nil { + output, err = task.obsClient.GetObject(getObjectInput, task.extensions...) + } else { + output, err = task.obsClient.GetObject(getObjectInput) + } + + if err == nil { + defer func() { + errMsg := output.Body.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close response body.") + } + }() + _err := updateDownloadFile(task.tempFileURL, task.RangeStart, output) + if _err != nil { + if !task.enableCheckpoint { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return _err + } + return output + } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 { + atomic.CompareAndSwapInt32(task.abort, 0, 1) + doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber) + } + return err +} + +func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) { + if extensions != nil { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...) + } else { + getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput) + } + + return +} + +func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) { + checkpointFilePath := input.CheckpointFile + checkpointFileStat, err := os.Stat(checkpointFilePath) + if err != nil { + doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err)) + return true, nil + } + if checkpointFileStat.IsDir() { + doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.") + return false, errors.New("checkpoint file can not be a folder") + } + err = loadCheckpointFile(checkpointFilePath, dfc) + if err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err)) + return true, nil + } else if !dfc.isValid(input, output) { + if dfc.TempFileInfo.TempFileUrl != "" { + _err := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + _err := os.Remove(checkpointFilePath) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err) + } + } else { + return false, nil + } + + return true, nil +} + +func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) { + cnt := objectSize / partSize + if objectSize%partSize > 0 { + cnt++ + } + + if cnt == 0 { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = 1 + dfc.DownloadParts = []DownloadPartInfo{downloadPart} + } else { + downloadParts := make([]DownloadPartInfo, 0, cnt) + var i int64 + for i = 0; i < cnt; i++ { + downloadPart := DownloadPartInfo{} + downloadPart.PartNumber = i + 1 + downloadPart.Offset = i * partSize + downloadPart.RangeEnd = (i+1)*partSize - 1 + downloadParts = append(downloadParts, downloadPart) + } + dfc.DownloadParts = downloadParts + if value := objectSize % partSize; value > 0 { + dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1 + } + } +} + +func createFile(tempFileURL string, fileSize int64) error { + fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := syscall.Close(fd) + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + err = syscall.Ftruncate(fd, fileSize) + if err != nil { + doLog(LEVEL_WARN, "Failed to create file with error [%v].", err) + } + return err +} + +func prepareTempFile(tempFileURL string, fileSize int64) error { + parentDir := filepath.Dir(tempFileURL) + stat, err := os.Stat(parentDir) + if err != nil { + doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err) + _err := os.MkdirAll(parentDir, os.ModePerm) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err) + return _err + } + } else if !stat.IsDir() { + doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir) + return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir) + } + + err = createFile(tempFileURL, fileSize) + if err == nil { + return nil + } + fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + if fileSize > 0 { + _, err = fd.WriteAt([]byte("a"), fileSize-1) + if err != nil { + doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err) + return err + } + } + + return nil +} + +func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error { + if downloadFileError != nil { + if !enableCheckpoint { + _err := os.Remove(tempFileURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err) + } + } + return downloadFileError + } + return nil +} + +func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) { + getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions) + if err != nil { + return nil, err + } + + objectSize := getObjectmetaOutput.ContentLength + partSize := input.PartSize + dfc := &DownloadCheckpoint{} + + var needCheckpoint = true + var checkpointFilePath = input.CheckpointFile + var enableCheckpoint = input.EnableCheckpoint + if enableCheckpoint { + needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput) + if err != nil { + return nil, err + } + } + + if needCheckpoint { + dfc.Bucket = input.Bucket + dfc.Key = input.Key + dfc.VersionId = input.VersionId + dfc.DownloadFile = input.DownloadFile + dfc.ObjectInfo = ObjectInfo{} + dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix() + dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength + dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag + dfc.TempFileInfo = TempFileInfo{} + dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp" + dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength + + sliceObject(objectSize, partSize, dfc) + _err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size) + if _err != nil { + return nil, _err + } + + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFilePath) + if _err != nil { + doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err) + _errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl) + if _errMsg != nil { + doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg) + } + return nil, _err + } + } + } + + downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions) + err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError) + if err != nil { + return nil, err + } + + err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile) + if err != nil { + doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err) + return nil, err + } + if enableCheckpoint { + err = os.Remove(checkpointFilePath) + if err != nil { + doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err) + } + } + + return getObjectmetaOutput, nil +} + +func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error { + fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666) + if err != nil { + doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath) + return err + } + defer func() { + errMsg := fd.Close() + if errMsg != nil { + doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg) + } + }() + _, err = fd.Seek(rangeStart, 0) + if err != nil { + doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err) + return err + } + fileWriter := bufio.NewWriterSize(fd, 65536) + part := make([]byte, 8192) + var readErr error + var readCount int + for { + readCount, readErr = output.Body.Read(part) + if readCount > 0 { + wcnt, werr := fileWriter.Write(part[0:readCount]) + if werr != nil { + doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr) + return werr + } + if wcnt != readCount { + doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt) + } + } + if readErr != nil { + if readErr != io.EOF { + doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr) + return readErr + } + break + } + } + err = fileWriter.Flush() + if err != nil { + doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err) + return err + } + return nil +} + +func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) { + if _, ok := result.(*GetObjectOutput); ok { + lock.Lock() + defer lock.Unlock() + dfc.DownloadParts[partNum-1].IsCompleted = true + if enableCheckpoint { + _err := updateCheckpointFile(dfc, checkpointFile) + if _err != nil { + doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err) + } + } + } else if result != errAbort { + if _err, ok := result.(error); ok { + err = _err + } + } + return +} + +func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error { + pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM) + var downloadPartError atomic.Value + var errFlag int32 + var abort int32 + lock := new(sync.Mutex) + for _, downloadPart := range dfc.DownloadParts { + if atomic.LoadInt32(&abort) == 1 { + break + } + if downloadPart.IsCompleted { + continue + } + task := downloadPartTask{ + GetObjectInput: GetObjectInput{ + GetObjectMetadataInput: input.GetObjectMetadataInput, + IfMatch: input.IfMatch, + IfNoneMatch: input.IfNoneMatch, + IfUnmodifiedSince: input.IfUnmodifiedSince, + IfModifiedSince: input.IfModifiedSince, + RangeStart: downloadPart.Offset, + RangeEnd: downloadPart.RangeEnd, + }, + obsClient: &obsClient, + extensions: extensions, + abort: &abort, + partNumber: downloadPart.PartNumber, + tempFileURL: dfc.TempFileInfo.TempFileUrl, + enableCheckpoint: input.EnableCheckpoint, + } + pool.ExecuteFunc(func() interface{} { + result := task.Run() + err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock) + if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) { + downloadPartError.Store(err) + } + return nil + }) + } + pool.ShutDown() + if err, ok := downloadPartError.Load().(error); ok { + return err + } + + return nil +} diff --git a/modules/obs/util.go b/modules/obs/util.go new file mode 100755 index 000000000..f3378dff9 --- /dev/null +++ b/modules/obs/util.go @@ -0,0 +1,536 @@ +// Copyright 2019 Huawei Technologies Co.,Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use +// this file except in compliance with the License. You may obtain a copy of the +// License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +//nolint:golint, unused +package obs + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$") +var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$") +var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+") +var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+") + +// StringContains replaces subStr in src with subTranscoding and returns the new string +func StringContains(src string, subStr string, subTranscoding string) string { + return strings.Replace(src, subStr, subTranscoding, -1) +} + +// XmlTranscoding replaces special characters with their escaped form +func XmlTranscoding(src string) string { + srcTmp := StringContains(src, "&", "&") + srcTmp = StringContains(srcTmp, "<", "<") + srcTmp = StringContains(srcTmp, ">", ">") + srcTmp = StringContains(srcTmp, "'", "'") + srcTmp = StringContains(srcTmp, "\"", """) + return srcTmp +} + +// StringToInt converts string value to int value with default value +func StringToInt(value string, def int) int { + ret, err := strconv.Atoi(value) + if err != nil { + ret = def + } + return ret +} + +// StringToInt64 converts string value to int64 value with default value +func StringToInt64(value string, def int64) int64 { + ret, err := strconv.ParseInt(value, 10, 64) + if err != nil { + ret = def + } + return ret +} + +// IntToString converts int value to string value +func IntToString(value int) string { + return strconv.Itoa(value) +} + +// Int64ToString converts int64 value to string value +func Int64ToString(value int64) string { + return strconv.FormatInt(value, 10) +} + +// GetCurrentTimestamp gets unix time in milliseconds +func GetCurrentTimestamp() int64 { + return time.Now().UnixNano() / 1000000 +} + +// FormatUtcNow gets a textual representation of the UTC format time value +func FormatUtcNow(format string) string { + return time.Now().UTC().Format(format) +} + +// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value +func FormatUtcToRfc1123(t time.Time) string { + ret := t.UTC().Format(time.RFC1123) + return ret[:strings.LastIndex(ret, "UTC")] + "GMT" +} + +// Md5 gets the md5 value of input +func Md5(value []byte) []byte { + m := md5.New() + _, err := m.Write(value) + if err != nil { + doLog(LEVEL_WARN, "MD5 failed to write") + } + return m.Sum(nil) +} + +// HmacSha1 gets hmac sha1 value of input +func HmacSha1(key, value []byte) []byte { + mac := hmac.New(sha1.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha1 failed to write") + } + return mac.Sum(nil) +} + +// HmacSha256 get hmac sha256 value if input +func HmacSha256(key, value []byte) []byte { + mac := hmac.New(sha256.New, key) + _, err := mac.Write(value) + if err != nil { + doLog(LEVEL_WARN, "HmacSha256 failed to write") + } + return mac.Sum(nil) +} + +// Base64Encode wrapper of base64.StdEncoding.EncodeToString +func Base64Encode(value []byte) string { + return base64.StdEncoding.EncodeToString(value) +} + +// Base64Decode wrapper of base64.StdEncoding.DecodeString +func Base64Decode(value string) ([]byte, error) { + return base64.StdEncoding.DecodeString(value) +} + +// HexMd5 returns the md5 value of input in hexadecimal format +func HexMd5(value []byte) string { + return Hex(Md5(value)) +} + +// Base64Md5 returns the md5 value of input with Base64Encode +func Base64Md5(value []byte) string { + return Base64Encode(Md5(value)) +} + +// Sha256Hash returns sha256 checksum +func Sha256Hash(value []byte) []byte { + hash := sha256.New() + _, err := hash.Write(value) + if err != nil { + doLog(LEVEL_WARN, "Sha256Hash failed to write") + } + return hash.Sum(nil) +} + +// ParseXml wrapper of xml.Unmarshal +func ParseXml(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return xml.Unmarshal(value, result) +} + +// parseJSON wrapper of json.Unmarshal +func parseJSON(value []byte, result interface{}) error { + if len(value) == 0 { + return nil + } + return json.Unmarshal(value, result) +} + +// TransToXml wrapper of xml.Marshal +func TransToXml(value interface{}) ([]byte, error) { + if value == nil { + return []byte{}, nil + } + return xml.Marshal(value) +} + +// Hex wrapper of hex.EncodeToString +func Hex(value []byte) string { + return hex.EncodeToString(value) +} + +// HexSha256 returns the Sha256Hash value of input in hexadecimal format +func HexSha256(value []byte) string { + return Hex(Sha256Hash(value)) +} + +// UrlDecode wrapper of url.QueryUnescape +func UrlDecode(value string) (string, error) { + ret, err := url.QueryUnescape(value) + if err == nil { + return ret, nil + } + return "", err +} + +// UrlDecodeWithoutError wrapper of UrlDecode +func UrlDecodeWithoutError(value string) string { + ret, err := UrlDecode(value) + if err == nil { + return ret + } + if isErrorLogEnabled() { + doLog(LEVEL_ERROR, "Url decode error") + } + return "" +} + +// IsIP checks whether the value matches ip address +func IsIP(value string) bool { + return ipRegex.MatchString(value) +} + +// UrlEncode encodes the input value +func UrlEncode(value string, chineseOnly bool) string { + if chineseOnly { + values := make([]string, 0, len(value)) + for _, val := range value { + _value := string(val) + if regex.MatchString(_value) { + _value = url.QueryEscape(_value) + } + values = append(values, _value) + } + return strings.Join(values, "") + } + return url.QueryEscape(value) +} + +func copyHeaders(m map[string][]string) (ret map[string][]string) { + if m != nil { + ret = make(map[string][]string, len(m)) + for key, values := range m { + _values := make([]string, 0, len(values)) + for _, value := range values { + _values = append(_values, value) + } + ret[strings.ToLower(key)] = _values + } + } else { + ret = make(map[string][]string) + } + + return +} + +func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) { + signature = "v2" + if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 { + if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) { + signature = "v4" + matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0]) + if len(matches) >= 3 { + region = matches[1] + regions := regionRegex.FindStringSubmatch(region) + if len(regions) >= 2 { + region = regions[1] + } + signedHeaders = matches[2] + } + + } else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) { + signature = "v2" + } + } + return +} + +func getTemporaryKeys() []string { + return []string{ + "Signature", + "signature", + "X-Amz-Signature", + "x-amz-signature", + } +} + +func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool { + isObs := true + if isTemporary { + for _, value := range querys { + keyPrefix := strings.ToLower(value) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + } else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) { + isObs = false + } + } + } else { + for key := range headers { + keyPrefix := strings.ToLower(key) + if strings.HasPrefix(keyPrefix, HEADER_PREFIX) { + isObs = false + break + } + } + } + return isObs +} + +func isPathStyle(headers map[string][]string, bucketName string) bool { + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + return true + } + return false +} + +// GetV2Authorization v2 Authorization +func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + params := make(map[string]string) + + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + headers = copyHeaders(headers) + pathStyle := isPathStyle(headers, bucketName) + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + conf.signature = SignatureObs + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true) + v2HashPrefix := OBS_HASH_PREFIX + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + return +} + +// GetAuthorization Authorization +func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) { + + if strings.HasPrefix(queryURL, "?") { + queryURL = queryURL[1:] + } + + method = strings.ToUpper(method) + + querys := strings.Split(queryURL, "&") + querysResult := make([]string, 0) + for _, value := range querys { + if value != "=" && len(value) != 0 { + querysResult = append(querysResult, value) + } + } + params := make(map[string]string) + + for _, value := range querysResult { + kv := strings.Split(value, "=") + length := len(kv) + if length == 1 { + key := UrlDecodeWithoutError(kv[0]) + params[key] = "" + } else if length >= 2 { + key := UrlDecodeWithoutError(kv[0]) + vals := make([]string, 0, length-1) + for i := 1; i < length; i++ { + val := UrlDecodeWithoutError(kv[i]) + vals = append(vals, val) + } + params[key] = strings.Join(vals, "=") + } + } + isTemporary := false + signature := "v2" + temporaryKeys := getTemporaryKeys() + for _, key := range temporaryKeys { + if _, ok := params[key]; ok { + isTemporary = true + if strings.ToLower(key) == "signature" { + signature = "v2" + } else if strings.ToLower(key) == "x-amz-signature" { + signature = "v4" + } + break + } + } + isObs := getIsObs(isTemporary, querysResult, headers) + headers = copyHeaders(headers) + pathStyle := false + if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") { + pathStyle = true + } + conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, + urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443}, + pathStyle: pathStyle} + + if isTemporary { + return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs) + } + signature, region, signedHeaders := parseHeaders(headers) + if signature == "v4" { + conf.signature = SignatureV4 + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestURL") + return nil + } + headerKeys := strings.Split(signedHeaders, ";") + _headers := make(map[string][]string, len(headerKeys)) + for _, headerKey := range headerKeys { + _headers[headerKey] = headers[headerKey] + } + ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers) + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"]) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs) + v2HashPrefix := V2_HASH_PREFIX + if isObs { + v2HashPrefix = OBS_HASH_PREFIX + } + ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"]) + } + return + +} + +func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string, + headers map[string][]string, isObs bool) (ret map[string]string) { + + if signature == "v4" { + conf.signature = SignatureV4 + + longDate, ok := params[PARAM_DATE_AMZ_CAMEL] + if !ok { + longDate = params[HEADER_DATE_AMZ] + } + shortDate := longDate[:8] + + credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL] + if !ok { + credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)] + } + + _credential := UrlDecodeWithoutError(credential) + + regions := regionRegex.FindStringSubmatch(_credential) + var region string + if len(regions) >= 2 { + region = regions[1] + } + + _, scope := getCredential(ak, region, shortDate) + + expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL] + if !ok { + expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)] + } + + signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] + if !ok { + signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)] + } + + algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL] + if !ok { + algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)] + } + + if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok { + delete(params, PARAM_SIGNATURE_AMZ_CAMEL) + } else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok { + delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)) + } + + ret = make(map[string]string, 6) + ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm + ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential + ret[PARAM_DATE_AMZ_CAMEL] = longDate + ret[PARAM_EXPIRES_AMZ_CAMEL] = expires + ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders + + requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + parsedRequestURL, _err := url.Parse(requestURL) + if _err != nil { + doLog(LEVEL_WARN, "Failed to parse requestUrl") + return nil + } + stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers) + ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false) + } else if signature == "v2" { + if isObs { + conf.signature = SignatureObs + } else { + conf.signature = SignatureV2 + } + _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false) + expires, ok := params["Expires"] + if !ok { + expires = params["expires"] + } + headers[HEADER_DATE_CAMEL] = []string{expires} + stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs) + ret = make(map[string]string, 3) + ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false) + ret["AWSAccessKeyId"] = UrlEncode(ak, false) + ret["Expires"] = UrlEncode(expires, false) + } + + return +} From 7e50c92fa7d182c266769d800724f107242b574e Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Fri, 29 Jan 2021 21:24:46 +0800 Subject: [PATCH 02/36] obs upload --- models/attachment.go | 9 ++- models/file_chunk.go | 16 ++-- modules/setting/setting.go | 17 +++++ modules/storage/obs.go | 119 ++++++++++++++++++++++++++++++ modules/storage/storage.go | 9 +++ routers/repo/attachment.go | 125 +++++++++++++++++++++++++++----- routers/routes/routes.go | 1 + web_src/js/components/MinioUploader.vue | 4 + 8 files changed, 274 insertions(+), 26 deletions(-) create mode 100755 modules/storage/obs.go mode change 100644 => 100755 modules/storage/storage.go diff --git a/models/attachment.go b/models/attachment.go index f08063b83..79aa317f2 100755 --- a/models/attachment.go +++ b/models/attachment.go @@ -41,6 +41,7 @@ type Attachment struct { Size int64 `xorm:"DEFAULT 0"` IsPrivate bool `xorm:"DEFAULT false"` DecompressState int32 `xorm:"DEFAULT 0"` + Type int `xorm:"DEFAULT 0"` CreatedUnix timeutil.TimeStamp `xorm:"created"` } @@ -350,7 +351,7 @@ func GetUnDecompressAttachments() ([]*Attachment, error) { func getUnDecompressAttachments(e Engine) ([]*Attachment, error) { attachments := make([]*Attachment, 0, 10) - return attachments, e.Where("decompress_state = ? and dataset_id != 0 and name like '%.zip'", DecompressStateInit).Find(&attachments) + return attachments, e.Where("decompress_state = ? and dataset_id != 0 and type = ? and name like '%.zip'", DecompressStateInit, TypeCloudBrainOne).Find(&attachments) } func GetAllPublicAttachments() ([]*AttachmentUsername, error) { @@ -360,7 +361,7 @@ func GetAllPublicAttachments() ([]*AttachmentUsername, error) { func getAllPublicAttachments(e Engine) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and is_private= ?", DecompressStateDone, false).Find(&attachments); err != nil { + "= `user`.id").Where("decompress_state= ? and is_private= ? and type = ?", DecompressStateDone, false, TypeCloudBrainOne).Find(&attachments); err != nil { return nil, err } return attachments, nil @@ -378,7 +379,7 @@ func GetPrivateAttachments(username string) ([]*AttachmentUsername, error) { func getPrivateAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and uploader_id= ?", DecompressStateDone, userID).Find(&attachments); err != nil { + "= `user`.id").Where("decompress_state= ? and uploader_id= ? and type = ?", DecompressStateDone, userID, TypeCloudBrainOne).Find(&attachments); err != nil { return nil, err } return attachments, nil @@ -406,7 +407,7 @@ func GetAllUserAttachments(userID int64) ([]*AttachmentUsername, error) { func getAllUserAttachments(e Engine, userID int64) ([]*AttachmentUsername, error) { attachments := make([]*AttachmentUsername, 0, 10) if err := e.Table("attachment").Join("LEFT", "`user`", "attachment.uploader_id "+ - "= `user`.id").Where("decompress_state= ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, userID, false).Find(&attachments); err != nil { + "= `user`.id").Where("decompress_state= ? and type = ? and (uploader_id= ? or is_private = ?)", DecompressStateDone, TypeCloudBrainOne, userID, false).Find(&attachments); err != nil { return nil, err } return attachments, nil diff --git a/models/file_chunk.go b/models/file_chunk.go index 4eb379b01..e8790e113 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -10,6 +10,11 @@ const ( FileUploaded ) +const ( + TypeCloudBrainOne = 0 + TypeCloudBrainTwo = 1 +) + type FileChunk struct { ID int64 `xorm:"pk autoincr"` UUID string `xorm:"uuid UNIQUE"` @@ -19,7 +24,8 @@ type FileChunk struct { TotalChunks int Size int64 UserID int64 `xorm:"INDEX"` - CompletedParts []string `xorm:"DEFAULT """` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas + Type int `xorm:"INDEX DEFAULT 0"` + CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } @@ -41,14 +47,14 @@ func getFileChunkByMD5(e Engine, md5 string) (*FileChunk, error) { } // GetFileChunkByMD5 returns fileChunk by given id -func GetFileChunkByMD5AndUser(md5 string, userID int64) (*FileChunk, error) { - return getFileChunkByMD5AndUser(x, md5, userID) +func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { + return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain) } -func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64) (*FileChunk, error) { +func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { fileChunk := new(FileChunk) - if has, err := e.Where("md5 = ? and user_id = ?", md5, userID).Get(fileChunk); err != nil { + if has, err := e.Where("md5 = ? and user_id = ? and type = ?", md5, userID, typeCloudBrain).Get(fileChunk); err != nil { return nil, err } else if !has { return nil, ErrFileChunkNotExist{md5, ""} diff --git a/modules/setting/setting.go b/modules/setting/setting.go index c11c70ccc..cf82578f3 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -447,6 +447,15 @@ var ( //blockchain config BlockChainHost string CommitValidDate string + + //obs config + Endpoint string + AccessKeyID string + SecretAccessKey string + Bucket string + Location string + BasePath string + //RealPath string ) // DateLang transforms standard language locale name to corresponding value in datetime plugin. @@ -1131,6 +1140,14 @@ func NewContext() { sec = Cfg.Section("blockchain") BlockChainHost = sec.Key("HOST").MustString("http://192.168.136.66:3302/") CommitValidDate = sec.Key("COMMIT_VALID_DATE").MustString("2021-01-15") + + sec = Cfg.Section("obs") + Endpoint = sec.Key("ENDPOINT").MustString("112.95.163.82") + AccessKeyID = sec.Key("ACCESS_KEY_ID").MustString("") + SecretAccessKey = sec.Key("SECRET_ACCESS_KEY").MustString("") + Bucket = sec.Key("BUCKET").MustString("testopendata") + Location = sec.Key("LOCATION").MustString("cn-south-222") + BasePath = sec.Key("BASE_PATH").MustString("attachment/") } func loadInternalToken(sec *ini.Section) string { diff --git a/modules/storage/obs.go b/modules/storage/obs.go new file mode 100755 index 000000000..9b1bfd3e8 --- /dev/null +++ b/modules/storage/obs.go @@ -0,0 +1,119 @@ +// Copyright 2020 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package storage + +import ( + "io" + "path" + "strconv" + "strings" + + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/obs" + "code.gitea.io/gitea/modules/setting" +) + +//check if has the object +func ObsHasObject(path string) (bool, error) { + hasObject := false + output, err := ObsCli.ListObjects(&obs.ListObjectsInput{Bucket:setting.Bucket}) + if err != nil { + log.Error("ListObjects failed:%v", err) + return hasObject, err + } + + for _, obj := range output.Contents { + if path == obj.Key { + hasObject = true + break + } + } + + return hasObject, nil +} + +func GetObsPartInfos(uuid string, uploadID string) (string, error) { + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: key, + UploadId: uploadID, + }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return "", err + } + + var chunks string + for _, partInfo := range output.Parts { + chunks += strconv.Itoa(partInfo.PartNumber) + "-" + partInfo.ETag + "," + } + + return chunks, nil +} + +func NewObsMultiPartUpload(uuid string) (string, error) { + input := &obs.InitiateMultipartUploadInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + + output, err := ObsCli.InitiateMultipartUpload(input) + if err != nil { + log.Error("InitiateMultipartUpload failed:", err.Error()) + return "", err + } + + return output.UploadId, nil +} + +func CompleteObsMultiPartUpload(uuid string, uploadID string) error { + input := &obs.CompleteMultipartUploadInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.UploadId = uploadID + output, err := ObsCli.ListParts(&obs.ListPartsInput{ + Bucket: setting.Bucket, + Key: input.Key, + UploadId: uploadID, + }) + if err != nil { + log.Error("ListParts failed:", err.Error()) + return err + } + + for _, partInfo := range output.Parts { + input.Parts = append(input.Parts, obs.Part{ + PartNumber: partInfo.PartNumber, + ETag: partInfo.ETag, + }) + } + + _, err = ObsCli.CompleteMultipartUpload(input) + if err != nil { + log.Error("CompleteMultipartUpload failed:", err.Error()) + return err + } + + return nil +} + +func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, partReader io.Reader) error { + input := &obs.UploadPartInput{} + input.PartNumber = partNumber + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.UploadId = uploadId + input.Bucket = setting.Bucket + input.PartSize = partSize + input.Body = partReader + _, err := ObsCli.UploadPart(input) + if err != nil { + log.Error("UploadPart failed:", err.Error()) + return err + } + + return nil + +} diff --git a/modules/storage/storage.go b/modules/storage/storage.go old mode 100644 new mode 100755 index d06ec7208..abf9e6e32 --- a/modules/storage/storage.go +++ b/modules/storage/storage.go @@ -8,6 +8,8 @@ import ( "fmt" "io" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/obs" "code.gitea.io/gitea/modules/setting" ) @@ -40,6 +42,7 @@ func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, sr var ( // Attachments represents attachments storage Attachments ObjectStorage + ObsCli *obs.ObsClient ) // Init init the stoarge @@ -63,6 +66,12 @@ func Init() error { return fmt.Errorf("Unsupported attachment store type: %s", setting.Attachment.StoreType) } + ObsCli, err = obs.New(setting.AccessKeyID, setting.SecretAccessKey, setting.Endpoint) + if err != nil { + log.Error("obs.New failed:", err) + return err + } + if err != nil { return err } diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 0258a5373..422e995f9 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -15,6 +15,7 @@ import ( "code.gitea.io/gitea/modules/worker" contexExt "context" "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -340,9 +341,16 @@ func UpdateAttachmentDecompressState(ctx *context.Context) { func GetSuccessChunks(ctx *context.Context) { fileMD5 := ctx.Query("md5") + typeCloudBrain := ctx.QueryInt("type") var chunks string - fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID) + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + + fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain) if err != nil { if models.IsErrFileChunkNotExist(err) { ctx.JSON(200, map[string]string{ @@ -357,12 +365,22 @@ func GetSuccessChunks(ctx *context.Context) { return } - isExist, err := storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) - if err != nil { - ctx.ServerError("HasObject failed", err) - return + isExist := false + if typeCloudBrain == models.TypeCloudBrainOne { + isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) + if err != nil { + ctx.ServerError("HasObject failed", err) + return + } + } else { + isExist, err = storage.ObsHasObject(models.AttachmentRelativePath(fileChunk.UUID)) + if err != nil { + ctx.ServerError("ObsHasObject failed", err) + return + } } + if isExist { if fileChunk.IsUploaded == models.FileNotUploaded { log.Info("the file has been uploaded but not recorded") @@ -380,10 +398,18 @@ func GetSuccessChunks(ctx *context.Context) { } } - chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) - if err != nil { - ctx.ServerError("GetPartInfos failed", err) - return + if typeCloudBrain == models.TypeCloudBrainOne { + chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) + if err != nil { + ctx.ServerError("GetPartInfos failed", err) + return + } + } else { + chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID) + if err != nil { + ctx.ServerError("GetObsPartInfos failed", err) + return + } } } @@ -445,6 +471,13 @@ func NewMultipart(ctx *context.Context) { return } + typeCloudBrain := ctx.QueryInt("type") + err = checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + if setting.Attachment.StoreType == storage.MinioStorageType { totalChunkCounts := ctx.QueryInt("totalChunkCounts") if totalChunkCounts > minio_ext.MaxPartsCount { @@ -459,10 +492,19 @@ func NewMultipart(ctx *context.Context) { } uuid := gouuid.NewV4().String() - uploadID, err := storage.NewMultiPartUpload(uuid) - if err != nil { - ctx.ServerError("NewMultipart", err) - return + var uploadID string + if typeCloudBrain == models.TypeCloudBrainOne { + uploadID, err = storage.NewMultiPartUpload(uuid) + if err != nil { + ctx.ServerError("NewMultipart", err) + return + } + } else { + uploadID, err = storage.NewObsMultiPartUpload(uuid) + if err != nil { + ctx.ServerError("NewObsMultiPartUpload", err) + return + } } _, err = models.InsertFileChunk(&models.FileChunk{ @@ -472,6 +514,7 @@ func NewMultipart(ctx *context.Context) { Md5: ctx.Query("md5"), Size: fileSize, TotalChunks: totalChunkCounts, + Type: typeCloudBrain, }) if err != nil { @@ -511,9 +554,40 @@ func GetMultipartUploadUrl(ctx *context.Context) { }) } +func UploadPart(ctx *context.Context) { + uuid := ctx.Query("uuid") + uploadID := ctx.Query("uploadID") + partNumber := ctx.QueryInt("chunkNumber") + size := ctx.QueryInt64("size") + + if size > minio_ext.MinPartSize { + ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + return + } + + url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + //todo:get file reader + //err := storage.ObsUploadPart(uuid, uploadID, partNumber, size, partReader) + if err != nil { + ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + return + } + + ctx.JSON(200, map[string]string{ + "url": url, + }) +} + func CompleteMultipart(ctx *context.Context) { uuid := ctx.Query("uuid") uploadID := ctx.Query("uploadID") + typeCloudBrain := ctx.QueryInt("type") + + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } fileChunk, err := models.GetFileChunkByUUID(uuid) if err != nil { @@ -525,10 +599,18 @@ func CompleteMultipart(ctx *context.Context) { return } - _, err = storage.CompleteMultiPartUpload(uuid, uploadID) - if err != nil { - ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) - return + if typeCloudBrain == models.TypeCloudBrainOne { + _, err = storage.CompleteMultiPartUpload(uuid, uploadID) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) + return + } + } else { + err = storage.CompleteObsMultiPartUpload(uuid, uploadID) + if err != nil { + ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) + return + } } fileChunk.IsUploaded = models.FileUploaded @@ -546,6 +628,7 @@ func CompleteMultipart(ctx *context.Context) { Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), + Type: typeCloudBrain, }) if err != nil { @@ -704,3 +787,11 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) { }) return } + +func checkTypeCloudBrain(typeCloudBrain int) error { + if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo { + log.Error("type error:", typeCloudBrain) + return errors.New("type error") + } + return nil +} diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 8831e20a5..7e29ebfa5 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -529,6 +529,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/get_multipart_url", repo.GetMultipartUploadUrl) m.Post("/complete_multipart", repo.CompleteMultipart) m.Post("/update_chunk", repo.UpdateMultipart) + m.Post("/upload_part", repo.UploadPart) }, reqSignIn) m.Group("/attachments", func() { diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index 71b44a39c..76c493323 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -21,6 +21,7 @@ import qs from 'qs'; import createDropzone from '../features/dropzone.js'; const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; +const cloud_brain_type = 0; export default { data() { @@ -255,6 +256,7 @@ export default { const params = { params: { md5: file.uniqueIdentifier, + type: cloud_brain_type, _csrf: csrf } }; @@ -283,6 +285,7 @@ export default { md5: file.uniqueIdentifier, size: file.size, fileType: file.type, + type: cloud_brain_type, _csrf: csrf } }); @@ -381,6 +384,7 @@ export default { file_name: file.name, size: file.size, dataset_id: file.datasetId, + type: cloud_brain_type, _csrf: csrf }) ); From 6d90893739db652e8af496abb4517a4d3a8b702a Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 30 Jan 2021 09:18:29 +0800 Subject: [PATCH 03/36] add config --- custom/conf/app.ini.sample | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index 13a3d4aeb..9ad232437 100755 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -1069,3 +1069,14 @@ PASSWORD = 4BPmgvK2hb2Eywwyp4YZRY4B7yQf4DAC [blockchain] HOST = http://192.168.207.84:3002/ COMMIT_VALID_DATE = 2021-01-15 + +[obs] +ENDPOINT = 112.95.163.82 +ACCESS_KEY_ID = FDP3LRMHLB9S77VWEHE3 +SECRET_ACCESS_KEY = LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN +BUCKET = testopendata +LOCATION = cn-south-222 +BASE_PATH = attachment/ + +[modelarts] +ENDPOINT = 112.95.163.80 From cc1a0c5250f50f9be1f17786c6e0eb461bc9d870 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 30 Jan 2021 19:00:35 +0800 Subject: [PATCH 04/36] upload obs --- models/file_chunk.go | 2 +- modules/obs/temporary.go | 52 +++++++++++++++++++++ modules/storage/obs.go | 44 +++++++++++++++-- routers/repo/attachment.go | 83 +++++++++++++++++++++++---------- web_src/js/components/MinioUploader.vue | 56 +++++++++++++++------- 5 files changed, 189 insertions(+), 48 deletions(-) diff --git a/models/file_chunk.go b/models/file_chunk.go index e8790e113..3b79adc7c 100755 --- a/models/file_chunk.go +++ b/models/file_chunk.go @@ -95,6 +95,6 @@ func UpdateFileChunk(fileChunk *FileChunk) error { func updateFileChunk(e Engine, fileChunk *FileChunk) error { var sess *xorm.Session sess = e.Where("uuid = ?", fileChunk.UUID) - _, err := sess.Cols("is_uploaded", "completed_parts").Update(fileChunk) + _, err := sess.Cols("is_uploaded").Update(fileChunk) return err } diff --git a/modules/obs/temporary.go b/modules/obs/temporary.go index bfaeb8197..7a2ad9b64 100755 --- a/modules/obs/temporary.go +++ b/modules/obs/temporary.go @@ -16,6 +16,7 @@ package obs import ( "errors" "fmt" + "github.com/unknwon/com" "io" "net/http" "os" @@ -788,3 +789,54 @@ func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string } return } + + +func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (string, error) { + requestURL := "" + + input := &UploadPartInput{} + input.Bucket = bucketName + input.Key = objectKey + input.PartNumber = partNumber + input.UploadId = uploadId + //input.ContentMD5 = _input.ContentMD5 + //input.SourceFile = _input.SourceFile + //input.Offset = _input.Offset + input.PartSize = partSize + //input.SseHeader = _input.SseHeader + //input.Body = _input.Body + + params, headers, _, err := input.trans(obsClient.conf.signature == SignatureObs) + if err != nil { + return requestURL, err + } + + if params == nil { + params = make(map[string]string) + } + + if headers == nil { + headers = make(map[string][]string) + } + + var extensions []extensionOptions + for _, extension := range extensions { + if extensionHeader, ok := extension.(extensionHeaders); ok { + _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) + if _err != nil { + doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err)) + } + } else { + doLog(LEVEL_WARN, "Unsupported extensionOptions") + } + } + + headers["Content-Length"] = []string{com.ToStr(partNumber,10)} + + requestURL, err = obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "") + if err != nil { + return requestURL, nil + } + + return requestURL, nil +} diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 9b1bfd3e8..0695dc62c 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -100,20 +100,54 @@ func CompleteObsMultiPartUpload(uuid string, uploadID string) error { return nil } -func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, partReader io.Reader) error { +func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, body io.Reader) (string, error) { input := &obs.UploadPartInput{} input.PartNumber = partNumber input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") input.UploadId = uploadId input.Bucket = setting.Bucket input.PartSize = partSize - input.Body = partReader - _, err := ObsCli.UploadPart(input) + input.Body = body + output, err := ObsCli.UploadPart(input) if err != nil { log.Error("UploadPart failed:", err.Error()) - return err + return "", err } - return nil + return output.ETag, nil +} + +func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) { + /* + input := &obs.CreateSignedUrlInput{} + input.Bucket = setting.Bucket + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Expires = int(PresignedUploadPartUrlExpireTime) + input.Method = obs.HTTP_PUT + + input.QueryParams = map[string]string{ + "Bucket": input.Bucket, + "Key": input.Key, + "PartNumber": com.ToStr(partNumber,10), + "UploadId": uploadId, + "PartSize": com.ToStr(partSize,10), + } + + input.Headers = map[string]string{ + + } + + */ + + Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + url, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) + if err != nil { + log.Error("CreateSignedUrl failed:", err.Error()) + return "", err + } + + log.Info(url) + + return url, nil } diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 422e995f9..9dcecb8c3 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -5,6 +5,15 @@ package repo import ( + contexExt "context" + "encoding/json" + "errors" + "fmt" + "mime/multipart" + "net/http" + "strconv" + "strings" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" @@ -13,13 +22,6 @@ import ( "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/upload" "code.gitea.io/gitea/modules/worker" - contexExt "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "strconv" - "strings" gouuid "github.com/satori/go.uuid" ) @@ -38,6 +40,15 @@ type CloudBrainDataset struct { CreateTime string `json:"created_at"` } +type UploadForm struct { + UploadID string `form:"uploadId"` + UuID string `form:"uuid"` + PartSize int64 `form:"size"` + Offset int64 `form:"offset"` + PartNumber int `form:"chunkNumber"` + PartFile multipart.File `form:"file"` +} + func RenderAttachmentSettings(ctx *context.Context) { renderAttachmentSettings(ctx) } @@ -538,15 +549,31 @@ func GetMultipartUploadUrl(ctx *context.Context) { partNumber := ctx.QueryInt("chunkNumber") size := ctx.QueryInt64("size") - if size > minio_ext.MinPartSize { - ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) return } - url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) - if err != nil { - ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) - return + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + if size > minio_ext.MinPartSize { + ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + return + } + + url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + return + } + } else { + url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size) + if err != nil { + ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) + return + } } ctx.JSON(200, map[string]string{ @@ -555,26 +582,34 @@ func GetMultipartUploadUrl(ctx *context.Context) { } func UploadPart(ctx *context.Context) { - uuid := ctx.Query("uuid") - uploadID := ctx.Query("uploadID") - partNumber := ctx.QueryInt("chunkNumber") - size := ctx.QueryInt64("size") + tmp, err := ctx.Req.Body().String() + log.Info(tmp) - if size > minio_ext.MinPartSize { - ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) + err = ctx.Req.ParseMultipartForm(100*1024*1024) + if err != nil { + ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err)) return } - url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) - //todo:get file reader - //err := storage.ObsUploadPart(uuid, uploadID, partNumber, size, partReader) + file, fileHeader, err := ctx.Req.FormFile("file") + log.Info(ctx.Req.Form.Get("file")) if err != nil { - ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) + ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err)) + return + } + + + + log.Info(fileHeader.Filename) + + etag, err := storage.ObsUploadPart("", "", 1, 1, file) + if err != nil { + ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err)) return } ctx.JSON(200, map[string]string{ - "url": url, + "etag": etag, }) } diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index 76c493323..85e47506d 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -21,7 +21,7 @@ import qs from 'qs'; import createDropzone from '../features/dropzone.js'; const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; -const cloud_brain_type = 0; +const cloud_brain_type = 1; export default { data() { @@ -129,9 +129,9 @@ export default { finishUpload(file) { this.emitDropzoneSuccess(file); - setTimeout(() => { - window.location.reload(); - }, 1000); + // setTimeout(() => { + // window.location.reload(); + // }, 1000); }, computeMD5(file) { @@ -326,6 +326,7 @@ export default { uploadID: file.uploadID, size: partSize, chunkNumber: currentChunk + 1, + type: cloud_brain_type, _csrf: csrf } }); @@ -348,30 +349,49 @@ export default { }) ); } + + async function uploadPart(currentChunk, partSize, e) { + console.log(e); + let params = new FormData(); + params.append("uuid", file.uuid); + params.append("uploadId", file.uploadID); + params.append("size", partSize); + params.append("chunkNumber", currentChunk + 1); + params.append("file", e.target.file); + params.append("_csrf", csrf); + return await axios.post('/attachments/upload_part', + params, + {headers: {'Content-Type': 'multipart/form-data'}} + ); + } + async function uploadChunk(e) { try { if (!checkSuccessChunks()) { const start = currentChunk * chunkSize; const partSize = start + chunkSize >= file.size ? file.size - start : chunkSize; + await uploadPart(currentChunk, partSize, e); + // 获取分片上传url - await getUploadChunkUrl(currentChunk, partSize); - if (urls[currentChunk] != '') { - // 上传到minio - await uploadMinio(urls[currentChunk], e); - if (etags[currentChunk] != '') { - // 更新数据库:分片上传结果 - //await updateChunk(currentChunk); - } else { - console.log("上传到minio uploadChunk etags[currentChunk] == ''");// TODO - } - } else { - console.log("uploadChunk urls[currentChunk] != ''");// TODO - } + // await getUploadChunkUrl(currentChunk, partSize); + // if (urls[currentChunk] != '') { + // // 上传到minio + // await uploadMinio(urls[currentChunk], e); + // if (etags[currentChunk] != '') { + // // 更新数据库:分片上传结果 + // //await updateChunk(currentChunk); + // } else { + // console.log("上传到minio uploadChunk etags[currentChunk] == ''");// TODO + // } + // } else { + // console.log("uploadChunk urls[currentChunk] != ''");// TODO + // } + } } catch (error) { - this.emitDropzoneFailed(file); console.log(error); + this.emitDropzoneFailed(file); } } From a817daef4035ae7aff5f4ed949dc22d75526208f Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 30 Jan 2021 21:09:05 +0800 Subject: [PATCH 05/36] js upload --- modules/obs/temporary.go | 65 +++++++++++++++++++++++++++++++++++++++++----- modules/storage/obs.go | 7 ++--- routers/repo/attachment.go | 51 +++++++++++++++++++++++++++++------- routers/routes/routes.go | 1 + 4 files changed, 105 insertions(+), 19 deletions(-) diff --git a/modules/obs/temporary.go b/modules/obs/temporary.go index 7a2ad9b64..dfb87ffc6 100755 --- a/modules/obs/temporary.go +++ b/modules/obs/temporary.go @@ -791,8 +791,8 @@ func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string } -func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (string, error) { - requestURL := "" +func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (*http.Request, error) { + var req *http.Request input := &UploadPartInput{} input.Bucket = bucketName @@ -808,7 +808,7 @@ func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uplo params, headers, _, err := input.trans(obsClient.conf.signature == SignatureObs) if err != nil { - return requestURL, err + return req, err } if params == nil { @@ -833,10 +833,63 @@ func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uplo headers["Content-Length"] = []string{com.ToStr(partNumber,10)} - requestURL, err = obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "") + requestURL, err := obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "") if err != nil { - return requestURL, nil + return req, nil } - return requestURL, nil + var _data io.Reader + req, err = http.NewRequest(HTTP_PUT, requestURL, _data) + if obsClient.conf.ctx != nil { + req = req.WithContext(obsClient.conf.ctx) + } + if err != nil { + return req, err + } + + if isDebugLogEnabled() { + auth := headers[HEADER_AUTH_CAMEL] + delete(headers, HEADER_AUTH_CAMEL) + + var isSecurityToken bool + var securityToken []string + if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken { + headers[HEADER_STS_TOKEN_AMZ] = []string{"******"} + } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken { + headers[HEADER_STS_TOKEN_OBS] = []string{"******"} + } + doLog(LEVEL_DEBUG, "Request headers: %v", headers) + headers[HEADER_AUTH_CAMEL] = auth + if isSecurityToken { + if obsClient.conf.signature == SignatureObs { + headers[HEADER_STS_TOKEN_OBS] = securityToken + } else { + headers[HEADER_STS_TOKEN_AMZ] = securityToken + } + } + } + + for key, value := range headers { + if key == HEADER_HOST_CAMEL { + req.Host = value[0] + delete(headers, key) + } else if key == HEADER_CONTENT_LENGTH_CAMEL { + req.ContentLength = StringToInt64(value[0], -1) + delete(headers, key) + } else { + req.Header[key] = value + } + } + + var lastRequest *http.Request + lastRequest = req + + req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT} + + if lastRequest != nil { + req.Host = lastRequest.Host + req.ContentLength = lastRequest.ContentLength + } + + return req, nil } diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 0695dc62c..c80b9612f 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -140,14 +140,15 @@ func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, part */ Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") - url, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) + req, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) if err != nil { log.Error("CreateSignedUrl failed:", err.Error()) return "", err } - log.Info(url) + log.Info(req.URL.String()) + log.Info("", req.Header) - return url, nil + return req.URL.String(), nil } diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 9dcecb8c3..2be78a2e0 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -11,6 +11,7 @@ import ( "fmt" "mime/multipart" "net/http" + "path" "strconv" "strings" @@ -275,13 +276,29 @@ func GetPresignedPutObjectURL(ctx *context.Context) { // AddAttachment response for add attachment record func AddAttachment(ctx *context.Context) { - uuid := ctx.Query("uuid") - has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) if err != nil { - ctx.ServerError("HasObject", err) + ctx.ServerError("checkTypeCloudBrain failed", err) return } + uuid := ctx.Query("uuid") + has := false + if typeCloudBrain == models.TypeCloudBrainOne { + has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) + if err != nil { + ctx.ServerError("HasObject", err) + return + } + } else { + has, err = storage.ObsHasObject(models.AttachmentRelativePath(uuid)) + if err != nil { + ctx.ServerError("ObsHasObject", err) + return + } + } + if !has { ctx.Error(404, "attachment has not been uploaded") return @@ -294,6 +311,7 @@ func AddAttachment(ctx *context.Context) { Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), + Type: typeCloudBrain, }) if err != nil { @@ -303,16 +321,19 @@ func AddAttachment(ctx *context.Context) { if attachment.DatasetID != 0 { if strings.HasSuffix(attachment.Name, ".zip") { - err = worker.SendDecompressTask(contexExt.Background(), uuid) - if err != nil { - log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) - } else { - attachment.DecompressState = models.DecompressStateIng - err = models.UpdateAttachment(attachment) + if typeCloudBrain == models.TypeCloudBrainOne { + err = worker.SendDecompressTask(contexExt.Background(), uuid) if err != nil { - log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) + log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) + } else { + attachment.DecompressState = models.DecompressStateIng + err = models.UpdateAttachment(attachment) + if err != nil { + log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) + } } } + //todo:decompress type_two } } @@ -581,6 +602,16 @@ func GetMultipartUploadUrl(ctx *context.Context) { }) } +func GetObsKey(ctx *context.Context) { + uuid := gouuid.NewV4().String() + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + + ctx.JSON(200, map[string]string{ + "uuid": uuid, + "key": key, + }) +} + func UploadPart(ctx *context.Context) { tmp, err := ctx.Req.Body().String() log.Info(tmp) diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 7e29ebfa5..a49dd4b65 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -530,6 +530,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Post("/complete_multipart", repo.CompleteMultipart) m.Post("/update_chunk", repo.UpdateMultipart) m.Post("/upload_part", repo.UploadPart) + m.Get("/get_obs_key", repo.GetObsKey) }, reqSignIn) m.Group("/attachments", func() { From 4c2b9a40b3da779c24101db7f7b009f43d968470 Mon Sep 17 00:00:00 2001 From: Gitea Date: Sat, 30 Jan 2021 21:11:41 +0800 Subject: [PATCH 06/36] test --- options/locale/locale_zh-CN.ini | 3 +- package-lock.json | 92 ++++- package.json | 1 + templates/repo/cloudbrain/index.tmpl | 2 +- templates/repo/datasets/index.tmpl | 3 + templates/repo/header.tmpl | 71 +++- web_src/js/components/MinioUploader.vue | 639 ++++++++++++++++---------------- 7 files changed, 483 insertions(+), 328 deletions(-) diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 8df65bd2a..3e49cb21a 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -756,7 +756,8 @@ cloudbrain.commit_image=提交 balance=余额 balance.total_view=余额总览 balance.available=可用余额: -balance.disable=不可用余额: +cloudbrain1=云脑1 +cloudbrain2=云脑2 template.items=模板选项 template.git_content=Git数据(默认分支) diff --git a/package-lock.json b/package-lock.json index 42d6926af..00eeeea26 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1922,9 +1922,9 @@ "axios": { "version": "0.21.1", "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", + "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", "requires": { - "follow-redirects": "1.5.10" + "follow-redirects": "^1.10.0" } }, "babel-loader": { @@ -3409,6 +3409,11 @@ "assert-plus": "^1.0.0" } }, + "date-format": { + "version": "3.0.0", + "resolved": "https://registry.npm.taobao.org/date-format/download/date-format-3.0.0.tgz", + "integrity": "sha1-64eANlx9KxURB4+0keZHl4DzrZU=" + }, "dateformat": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-2.2.0.tgz", @@ -4020,6 +4025,15 @@ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, + "esdk-obs-nodejs": { + "version": "3.20.11", + "resolved": "https://registry.npm.taobao.org/esdk-obs-nodejs/download/esdk-obs-nodejs-3.20.11.tgz?cache=0&sync_timestamp=1610351636380&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fesdk-obs-nodejs%2Fdownload%2Fesdk-obs-nodejs-3.20.11.tgz", + "integrity": "sha1-/bMuzu3qoT+xLgmCcgg8yM6MIsE=", + "requires": { + "log4js": "^6.3.0", + "xml2js": "^0.4.23" + } + }, "eslint": { "version": "6.8.0", "resolved": "https://registry.npm.taobao.org/eslint/download/eslint-6.8.0.tgz", @@ -5195,8 +5209,7 @@ "flatted": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true + "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" }, "flatten": { "version": "1.0.3", @@ -5367,6 +5380,16 @@ "readable-stream": "^2.0.0" } }, + "fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npm.taobao.org/fs-extra/download/fs-extra-8.1.0.tgz?cache=0&sync_timestamp=1611075469998&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffs-extra%2Fdownload%2Ffs-extra-8.1.0.tgz", + "integrity": "sha1-SdQ8RaiM2Wd2aMt74bRu/bjS4cA=", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + } + }, "fs-minipass": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", @@ -7700,6 +7723,14 @@ "minimist": "^1.2.5" } }, + "jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npm.taobao.org/jsonfile/download/jsonfile-4.0.0.tgz?cache=0&sync_timestamp=1604161797011&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjsonfile%2Fdownload%2Fjsonfile-4.0.0.tgz", + "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", + "requires": { + "graceful-fs": "^4.1.6" + } + }, "jsprim": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", @@ -8184,6 +8215,18 @@ "chalk": "^2.4.2" } }, + "log4js": { + "version": "6.3.0", + "resolved": "https://registry.npm.taobao.org/log4js/download/log4js-6.3.0.tgz", + "integrity": "sha1-EN+vu0NDUaPjAnegC5h5RG9xW8s=", + "requires": { + "date-format": "^3.0.0", + "debug": "^4.1.1", + "flatted": "^2.0.1", + "rfdc": "^1.1.4", + "streamroller": "^2.2.4" + } + }, "longest": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", @@ -11821,6 +11864,11 @@ "resolved": "https://registry.npmjs.org/rework-visit/-/rework-visit-1.0.0.tgz", "integrity": "sha1-mUWygD8hni96ygCtuLyfZA+ELJo=" }, + "rfdc": { + "version": "1.2.0", + "resolved": "https://registry.npm.taobao.org/rfdc/download/rfdc-1.2.0.tgz?cache=0&sync_timestamp=1610744108114&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Frfdc%2Fdownload%2Frfdc-1.2.0.tgz", + "integrity": "sha1-npiUJY9I8oS0PDFDxoBwpPNzuUk=" + }, "rgb-regex": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", @@ -12428,6 +12476,23 @@ "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" }, + "streamroller": { + "version": "2.2.4", + "resolved": "https://registry.npm.taobao.org/streamroller/download/streamroller-2.2.4.tgz", + "integrity": "sha1-wZjO1C25QIamGTYIGHzoCl8rDlM=", + "requires": { + "date-format": "^2.1.0", + "debug": "^4.1.1", + "fs-extra": "^8.1.0" + }, + "dependencies": { + "date-format": { + "version": "2.1.0", + "resolved": "https://registry.npm.taobao.org/date-format/download/date-format-2.1.0.tgz", + "integrity": "sha1-MdW16iEc9f12TNOLr50DPffhJc8=" + } + } + }, "strict-uri-encode": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", @@ -13983,6 +14048,11 @@ "os-name": "^3.1.0" } }, + "universalify": { + "version": "0.1.2", + "resolved": "https://registry.npm.taobao.org/universalify/download/universalify-0.1.2.tgz?cache=0&sync_timestamp=1603179967633&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Funiversalify%2Fdownload%2Funiversalify-0.1.2.tgz", + "integrity": "sha1-tkb2m+OULavOzJ1mOcgNwQXvqmY=" + }, "unquote": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", @@ -15035,6 +15105,20 @@ "repeat-string": "^1.5.2" } }, + "xml2js": { + "version": "0.4.23", + "resolved": "https://registry.npm.taobao.org/xml2js/download/xml2js-0.4.23.tgz?cache=0&sync_timestamp=1599054229598&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fxml2js%2Fdownload%2Fxml2js-0.4.23.tgz", + "integrity": "sha1-oMaVFnUkIesqx1juTUzPWIQ+rGY=", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + } + }, + "xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npm.taobao.org/xmlbuilder/download/xmlbuilder-11.0.1.tgz", + "integrity": "sha1-vpuuHIoEbnazESdyY0fQrXACvrM=" + }, "xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", diff --git a/package.json b/package.json index 77e28bd06..26654ef8d 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,7 @@ "cssnano": "4.1.10", "domino": "2.1.5", "dropzone": "5.7.2", + "esdk-obs-nodejs": "3.20.11", "fast-glob": "3.2.2", "file-loader": "6.0.0", "fomantic-ui": "2.8.4", diff --git a/templates/repo/cloudbrain/index.tmpl b/templates/repo/cloudbrain/index.tmpl index 778c70e50..e2197e3b8 100755 --- a/templates/repo/cloudbrain/index.tmpl +++ b/templates/repo/cloudbrain/index.tmpl @@ -198,7 +198,7 @@
-

{{.i18n.Tr "repo.cloudbrain"}}

+

{{.i18n.Tr "repo.cloudbrain1"}}

diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index 2da6ca8bf..c8588ef80 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -50,6 +50,7 @@
+
@@ -118,6 +119,8 @@
+ + + -// async function completeUpload() { -// return await axios.post( -// '/attachments/complete_multipart', -// qs.stringify({ -// uuid: file.uuid, -// uploadID: file.uploadID, -// file_name: file.name, -// size: file.size, -// dataset_id: file.datasetId, -// type: cloud_brain_type, -// _csrf: csrf -// }) -// ); -// } + + \ No newline at end of file From 2b7600c337c9554f88383d6e53ee62d6c8443b45 Mon Sep 17 00:00:00 2001 From: Gitea Date: Mon, 1 Feb 2021 19:50:52 +0800 Subject: [PATCH 08/36] debug obsclient --- templates/repo/datasets/index.tmpl | 135 --------------- web_src/js/components/MinioUploader.vue | 283 +++++++++++++------------------- webpack.config.js | 3 + 3 files changed, 119 insertions(+), 302 deletions(-) mode change 100644 => 100755 webpack.config.js diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index 6a49d7cba..9b8e46062 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -1,32 +1,4 @@ - {{template "base/head" .}} -
{{template "repo/header" .}} @@ -106,14 +78,6 @@
- - - - - - {{if .Permission.CanWrite $.UnitTypeDatasets}}
{{template "base/footer" .}} - - - - \ No newline at end of file diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index d87c28949..fdd9ed50c 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -4,11 +4,6 @@ id="dataset" class="dropzone" /> -

{{ file_status_text }} @@ -25,6 +20,8 @@ import SparkMD5 from 'spark-md5'; import axios from 'axios'; import qs from 'qs'; +// import esdk-obs-nodejs from 'esdk-obs-nodejs'; +// import * as ObsClient from 'esdk-obs-nodejs' import createDropzone from '../features/dropzone.js'; const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; @@ -72,81 +69,9 @@ export default { previewTemplate += '

\n'; previewTemplate += '
'; - // var fileArr = new Array(); - // jQuery(function($){ - // Dropzone.autoDiscover = true; - // Dropzone.options.myAwesomeDropzone = false; - // try { - // $(".dropzone").dropzone({ - // url:"/todouploader", - // method:"post", - // paramName:"file", - // autoProcessQueue:true,//自动上传 - // maxFilesize:1 * 1024 * 1024 * 1024 * 1024, // MB - // acceptedFiles:"*/*", - // dictInvalidFileType:"无效的文件类型", - // addRemoveLinks:true, - // maxFiles: 1, //指的是上传目录下的最大文件数 - // // dictRemoveFile:"移除文件", - // dictDefaultMessage: - // "拖动文件上传\ - // (或者点击上传)
\ - // ", - // dictResponseError:"文件上传失败!", - // dictFileTooBig:"文件过大,上传失败!", - // previewTemplate: "
\n
\n
\n
\n \n
\n
\n
上传成功
\n
上传失败
\n
\n
", - // init:function(){ - // this.on("addedfile",function(file,data) { - // fileArr.push(file.upload.uuid); - // //解决点击时重复发送请求 - // $(".dz-remove").each(function(index) { - // if(!$(".dz-remove:eq(" + index + ")").attr("id")) { - // $(".dz-remove:eq(" + index + ")").attr("id",fileArr[index]); - // } - // }) - // }), - - // this.on("success",function(file,data){ - // //var myDropzone = this; - // $("#" + file.upload.uuid).click(function() { - // var fileName = $(this).parent().find(".dz-filename").text(); - // console.log(fileName ) - // }) - // }); - - // this.on("complete",function(file) { - // if(file.status == "canceled" || file.status == "error") { - // var fileName = $("#" + file.upload.uuid).parent().find(".dz-filename").text(); - // // setTimeout(function() { - // // $.ajax({ - // // type:"POST", - // // url:"${pageContext.request.contextPath}/uploadController/delete.action", - // // data:{"fileName":fileName}, - // // dataType:"json", - // // success:function(data){ - // // if(data == "success") { - // // // alert("删除成功"); - // // } - // // }, - // // error:function(ajax) { - // // alert(ajax.status); - // // } - // // }) - // // },2000); - // } - // }) - - // } - // }); - // } catch(e) { - // alert('Dropzone.js does not support older browsers!'); - // } - // }); - - - const $dropzone = $('div#dataset'); console.log('createDropzone'); + const dropzoneUploader = await createDropzone($dropzone[0], { url: '/todouploader', maxFiles: this.maxFiles, @@ -157,14 +82,31 @@ export default { dictInvalidFileType: this.dropzoneParams.data('invalid-input-type'), dictFileTooBig: this.dropzoneParams.data('file-too-big'), dictRemoveFile: this.dropzoneParams.data('remove-file'), - previewTemplate + previewTemplate, }); + + // 文件发送前调用 + dropzoneUploader.on('sending', (file, xhr, formData) => { + console.log(xhr) + console.log(formData) + }); + + // 文件复制后触发 dropzoneUploader.on('addedfile', (file) => { - setTimeout(() => { - // eslint-disable-next-line no-unused-expressions - file.accepted && this.onFileAdded(file); - }, 200); + console.log("file",file) + if(file.status == 'added'){ + this.onFileAdded(file) + } }); + + dropzoneUploader.on('success', (file, res) => { + this.emitDropzoneSuccess(file) + }); + + dropzoneUploader.on('totaluploadprogress', function(file, progress){ + this.updateProgress(file, progress) + }); + dropzoneUploader.on('maxfilesexceeded', function (file) { if (this.files[0].status !== 'success') { alert(this.dropzoneParams.data('waitting-uploading')); @@ -188,107 +130,114 @@ export default { '.dz-upload' ).style.width = `${progress}%`; }, + emitDropzoneSuccess(file) { + file.status = 'success'; + this.dropzoneUploader.emit('success', file); + this.dropzoneUploader.emit('complete', file); + this.finishUpload(file) + }, + emitDropzoneFailed(file) { + this.status = this.dropzoneParams.data('falied'); + file.status = 'error'; + this.dropzoneUploader.emit('error', file); + // this.dropzoneUploader.emit('complete', file); + }, onFileAdded(file) { - file.datasetId = document - .getElementById('datasetId') - .getAttribute('datasetId'); + // file.datasetId = document + // .getElementById('datasetId') + // .getAttribute('datasetId'); + console.log("执行到我了") this.resetStatus(); - // this.computeMD5(file); - }, + this.status = this.dropzoneParams.data('obs-connectting'); + + // 引入obs库 + var ObsClient = require('esdk-obs-nodejs'); - // finishUpload(file) { - // this.emitDropzoneSuccess(file); - // setTimeout(() => { - // window.location.reload(); - // }, 1000); - // }, + // 创建ObsClient实例 + var obsClient = new ObsClient({ + access_key_id: 'FDP3LRMHLB9S77VWEHE3', + secret_access_key: 'LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN', + server : 'https://112.95.163.82' + }); + console.log("obsClient", obsClient) - fileLoaded(e) { - this.updateProgress(file, ((currentChunk / chunks) * 100).toFixed(2)); - }, - - // async computeMD5Success(md5edFile) { - // async function addAttachment(file) { - // return await axios.post( - // '/attachments/add', - // qs.stringify({ - // uuid: file.uuid, - // file_name: file.name, - // size: file.size, - // dataset_id: file.datasetId, - // _csrf: csrf - // }) - // ); - // } - // }, + // 创建桶 + // obsClient.createBucket({ + // Bucket : 'bucketname' + // }, (err, result) => { + // if(err){ + // console.error('Error-->' + err); + // this.emitDropzoneFailed(file) + // }else{ + // console.log('Status-->' + result.CommonMsg.Status); + // } + // }); - - async newMultiUpload(file) { - const res = await axios.get('/attachments/new_multipart', { - params: { - totalChunkCounts: file.totalChunkCounts, - md5: file.uniqueIdentifier, - size: file.size, - fileType: file.type, - _csrf: csrf + obsClient.uploadFile({ + Bucket : 'bucketname', + Key : this.get_result().key, + // 设置待上传的本地文件,localfile为待上传的本地文件路径,需要指定到具体的文件名 + UploadFile : file.name, + // 设置分段大小为10MB + PartSize : 10 * 1024 * 1024, + // 开启断点续传模式 + EnableCheckpoint : true + }, (err, result) => { + if(err){ + console.error('Error-->' + err); + this.emitDropzoneFailed(file) + }else{ + console.log('RequestId-->' + result.InterfaceResult.RequestId); + console.log('Bucket-->' + result.InterfaceResult.Bucket); + console.log('Key-->' + result.InterfaceResult.Key); + console.log('Location-->' + result.InterfaceResult.Location); + this.emitDropzoneSuccess(file); } }); - file.uploadID = res.data.uploadID; - file.uuid = res.data.uuid; }, - multipartUpload(file) { - async function updateChunk(currentChunk) { - await axios.post( - '/attachments/update_chunk', - qs.stringify({ - uuid: file.uuid, - chunkNumber: currentChunk + 1, - etag: etags[currentChunk], - _csrf: csrf - }) - ); - } - - async function completeUpload() { - return await axios.post( - '/attachments/complete_multipart', - qs.stringify({ - uuid: file.uuid, - uploadID: file.uploadID, - file_name: file.name, - size: file.size, - dataset_id: file.datasetId, - _csrf: csrf - }) - ); - } - - const successChunks = []; - let successParts = []; - successParts = file.chunks.split(','); - for (let i = 0; i < successParts.length; i++) { - successChunks[i] = successParts[i].split('-')[0]; - } + get_result(){ + var res + $.ajax({ + url: '/attachments/get_obs_key', + type: 'GET', + async: false, + success: function(result){ + res = result + } + }); + console.log("res=", res) + return res + }, + finishUpload(file) { + $.ajax({ + url: '/attachments/add', + type: 'POST', + data: { + 'uuid': get_result().uuid, + 'file_name': filepath.split('/')[-1], + 'size': file.size, + 'dataset_id': file.datasetId, + '_csrf': csrf, + 'type': 1 + }, + async: false, + success: function (data) { + console.log(data) + } + }) + setTimeout(() => { + window.location.reload(); + }, 1000); } + } }; + + +
+
+
+
+
+
+
+
+
+ + +
+ +
+ {{template "repo/header" .}} + +
+ + +
+
+

{{.i18n.Tr "repo.cloudbrain2"}}

+
+ +
+
+ +
+ {{if .Permission.CanWrite $.UnitTypeCloudBrain}} + {{.i18n.Tr "repo.cloudbrain.new"}} {{end}} +
+
+ + +
+ + +
+
+
+ + +
+
+
+
+
+ +
+
+
+ + +
+ {{range .Tasks}} +
+
+ + + + + +
+ {{.Status}} +
+ + +
+ {{svg "octicon-flame" 16}} {{TimeSinceUnix .CreatedUnix $.Lang}} +
+ + +
+ + + 查看 + + +
+ + +
+ +
+ + +
+
+ + {{$.CsrfTokenHtml}} + 删除 + +
+
+ + +
+ +
+ + +
+
+
+ {{$.CsrfTokenHtml}} + 停止 +
+
+
+ + + + 提交镜像 + + + + +
+
+ {{end}} {{template "base/paginate" .}} +
+ +
+
+
+ +
+ +
+
+ + + + +
+ +
+ + +{{template "base/footer" .}} + + \ No newline at end of file diff --git a/templates/repo/modelarts/new.tmpl b/templates/repo/modelarts/new.tmpl new file mode 100755 index 000000000..d5d2680a4 --- /dev/null +++ b/templates/repo/modelarts/new.tmpl @@ -0,0 +1,181 @@ +{{template "base/head" .}} + + +
+
+
+
+
+
+
+
+
+
+ {{template "repo/header" .}} +
+
+ {{template "base/alert" .}} +
+ {{.CsrfTokenHtml}} +

+ {{.i18n.Tr "repo.cloudbrain.new"}} +

+
+ +
+ + +
+ +
+ + + +
+ +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + + {{.i18n.Tr "repo.cloudbrain.cancel"}} +
+
+
+
+
+
+{{template "base/footer" .}} + + \ No newline at end of file diff --git a/templates/repo/modelarts/show.tmpl b/templates/repo/modelarts/show.tmpl new file mode 100755 index 000000000..fe4ec7ab3 --- /dev/null +++ b/templates/repo/modelarts/show.tmpl @@ -0,0 +1,104 @@ +{{template "base/head" .}} +
+{{template "repo/header" .}} +
+
+ {{template "base/alert" .}} + +

+ 返回 +

+
+
+ {{with .task}} +

任务名称: {{.JobName}}

+ {{end}} +
+
+

任务结果:

+ {{with .taskRes}} + {{range .TaskStatuses}} + + + + + + + + + + + + + + + + + + + + + + + +
状态 {{.State}}
开始时间 {{.StartTime}}
结束时间 {{.FinishedTime}}
ExitCode {{.ExitCode}}
退出信息 {{.ExitDiagnostics| nl2br}}
+ {{end}} + {{end}} +
+
+ {{with .result}} + + + + + + + + + + + + + + + + + + +
硬件信息
CPU {{.Resource.CPU}}
Memory {{.Resource.Memory}}
NvidiaComGpu {{.Resource.NvidiaComGpu}}
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
调试信息
状态 {{.Platform}}
开始时间 {{.JobStatus.StartTime}}
结束时间 {{.JobStatus.EndTime}}
ExitCode {{.JobStatus.AppExitCode}}
退出信息 {{.JobStatus.AppExitDiagnostics | nl2br}}
+ {{end}} +
+
+ +
+
+
+{{template "base/footer" .}} diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index e637d83c7..9083dd560 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -155,7 +155,7 @@ export default { var obsClient = new ObsClient({ access_key_id: 'FDP3LRMHLB9S77VWEHE3', secret_access_key: 'LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN', - server : 'https://112.95.163.82' + server : 'https://obs.cn-south-222.ai.pcl.cn' }); var cp; From 6378d27dfeba24f20603cd2819e54a181aeb921f Mon Sep 17 00:00:00 2001 From: Gitea Date: Tue, 2 Feb 2021 23:16:19 +0800 Subject: [PATCH 13/36] debug obs --- templates/base/head.tmpl | 1 + templates/repo/datasets/index.tmpl | 13 +- web_src/js/components/MinioUploader.vue | 288 --------------------- .../js/features/esdk-obs-browserjs-3.19.5.min.js | 6 - webpack.config.js | 6 +- 5 files changed, 6 insertions(+), 308 deletions(-) mode change 100644 => 100755 templates/base/head.tmpl delete mode 100755 web_src/js/features/esdk-obs-browserjs-3.19.5.min.js diff --git a/templates/base/head.tmpl b/templates/base/head.tmpl old mode 100644 new mode 100755 index cad2c976d..ce5d9892d --- a/templates/base/head.tmpl +++ b/templates/base/head.tmpl @@ -175,6 +175,7 @@ {{end}} {{template "custom/header" .}} + {{template "custom/body_outer_pre" .}} diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index 1c8841555..f2eaab004 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -1,7 +1,6 @@ {{template "base/head" .}} -
- {{template "repo/header" .}} + {{template "repo/header" .}}
@@ -78,7 +76,6 @@
- {{if .Permission.CanWrite $.UnitTypeDatasets}}
- - -{{template "base/footer" .}} - - - +{{template "base/footer" .}} \ No newline at end of file diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index e637d83c7..e69de29bb 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -1,288 +0,0 @@ - - - - - \ No newline at end of file diff --git a/web_src/js/features/esdk-obs-browserjs-3.19.5.min.js b/web_src/js/features/esdk-obs-browserjs-3.19.5.min.js deleted file mode 100755 index 7b05556a6..000000000 --- a/web_src/js/features/esdk-obs-browserjs-3.19.5.min.js +++ /dev/null @@ -1,6 +0,0 @@ -!function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var c="function"==typeof require&&require;if(!u&&c)return c(o,!0);if(i)return i(o,!0);var a=new Error("Cannot find module '"+o+"'");throw a.code="MODULE_NOT_FOUND",a}var f=n[o]={exports:{}};t[o][0].call(f.exports,function(n){var r=t[o][1][n];return s(r||n)},f,f.exports,e,t,n,r)}return n[o].exports}for(var i="function"==typeof require&&require,o=0;o2?arguments[2]:void 0,s=Math.min((void 0===f?u:i(f,u))-a,u-c),l=1;for(a0;)a in r?r[c]=r[a]:delete r[c],c+=l,a+=l;return r}},{114:114,118:118,119:119}],9:[function(t,n,r){"use strict";var e=t(119),i=t(114),o=t(118);n.exports=function fill(t){for(var n=e(this),r=o(n.length),u=arguments.length,c=i(u>1?arguments[1]:void 0,r),a=u>2?arguments[2]:void 0,f=void 0===a?r:i(a,r);f>c;)n[c++]=t;return n}},{114:114,118:118,119:119}],10:[function(t,n,r){var e=t(39);n.exports=function(t,n){var r=[];return e(t,!1,r.push,r,n),r}},{39:39}],11:[function(t,n,r){var e=t(117),i=t(118),o=t(114);n.exports=function(t){return function(n,r,u){var c,a=e(n),f=i(a.length),s=o(u,f);if(t&&r!=r){for(;f>s;)if((c=a[s++])!=c)return!0}else for(;f>s;s++)if((t||s in a)&&a[s]===r)return t||s||0;return!t&&-1}}},{114:114,117:117,118:118}],12:[function(t,n,r){var e=t(25),i=t(47),o=t(119),u=t(118),c=t(15);n.exports=function(t,n){var r=1==t,a=2==t,f=3==t,s=4==t,l=6==t,h=5==t||l,v=n||c;return function(n,c,p){for(var d,y,g=o(n),m=i(g),b=e(c,p,3),x=u(m.length),S=0,w=r?v(n,x):a?v(n,0):void 0;x>S;S++)if((h||S in m)&&(d=m[S],y=b(d,S,g),t))if(r)w[S]=y;else if(y)switch(t){case 3:return!0;case 5:return d;case 6:return S;case 2:w.push(d)}else if(s)return!1;return l?-1:f||s?s:w}}},{118:118,119:119,15:15,25:25,47:47}],13:[function(t,n,r){var e=t(3),i=t(119),o=t(47),u=t(118);n.exports=function(t,n,r,c,a){e(n);var f=i(t),s=o(f),l=u(f.length),h=a?l-1:0,v=a?-1:1;if(r<2)for(;;){if(h in s){c=s[h],h+=v;break}if(h+=v,a?h<0:l<=h)throw TypeError("Reduce of empty array with no initial value")}for(;a?h>=0:l>h;h+=v)h in s&&(c=n(c,s[h],h,f));return c}},{118:118,119:119,3:3,47:47}],14:[function(t,n,r){var e=t(51),i=t(49),o=t(128)("species");n.exports=function(t){var n;return i(t)&&(n=t.constructor,"function"!=typeof n||n!==Array&&!i(n.prototype)||(n=void 0),e(n)&&null===(n=n[o])&&(n=void 0)),void 0===n?Array:n}},{128:128,49:49,51:51}],15:[function(t,n,r){var e=t(14);n.exports=function(t,n){return new(e(t))(n)}},{14:14}],16:[function(t,n,r){"use strict";var e=t(3),i=t(51),o=t(46),u=[].slice,c={},a=function(t,n,r){if(!(n in c)){for(var e=[],i=0;i1?arguments[1]:void 0,3);r=r?r.n:this._f;)for(e(r.v,r.k,this);r&&r.r;)r=r.p},has:function has(t){return!!y(p(this,n),t)}}),h&&e(s.prototype,"size",{get:function(){return p(this,n)[d]}}),s},def:function(t,n,r){var e,i,o=y(t,n);return o?o.v=r:(t._l=o={i:i=v(n,!0),k:n,v:r,p:e=t._l,n:void 0,r:!1},t._f||(t._f=o),e&&(e.n=o),t[d]++,"F"!==i&&(t._i[i]=o)),t},getEntry:y,setStrong:function(t,n,r){f(t,n,function(t,r){this._t=p(t,n),this._k=r,this._l=void 0},function(){for(var t=this,n=t._k,r=t._l;r&&r.r;)r=r.p;return t._t&&(t._l=r=r?r.n:t._t._f)?"keys"==n?s(0,r.k):"values"==n?s(0,r.v):s(0,[r.k,r.v]):(t._t=void 0,s(1))},r?"entries":"values",!r,!0),l(n)}}},{100:100,125:125,25:25,29:29,39:39,55:55,57:57,6:6,66:66,71:71,72:72,93:93}],20:[function(t,n,r){var e=t(17),i=t(10);n.exports=function(t){return function toJSON(){if(e(this)!=t)throw TypeError(t+"#toJSON isn't generic");return i(this)}}},{10:10,17:17}],21:[function(t,n,r){"use strict";var e=t(93),i=t(66).getWeak,o=t(7),u=t(51),c=t(6),a=t(39),f=t(12),s=t(41),l=t(125),h=f(5),v=f(6),p=0,d=function(t){return t._l||(t._l=new y)},y=function(){this.a=[]},g=function(t,n){return h(t.a,function(t){return t[0]===n})};y.prototype={get:function(t){var n=g(this,t);if(n)return n[1]},has:function(t){return!!g(this,t)},set:function(t,n){var r=g(this,t);r?r[1]=n:this.a.push([t,n])},delete:function(t){var n=v(this.a,function(n){return n[0]===t});return~n&&this.a.splice(n,1),!!~n}},n.exports={getConstructor:function(t,n,r,o){var f=t(function(t,e){c(t,f,n,"_i"),t._t=n,t._i=p++,t._l=void 0,void 0!=e&&a(e,r,t[o],t)});return e(f.prototype,{delete:function(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).delete(t):r&&s(r,this._i)&&delete r[this._i]},has:function has(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).has(t):r&&s(r,this._i)}}),f},def:function(t,n,r){var e=i(o(n),!0);return!0===e?d(t).set(n,r):e[t._i]=r,t},ufstore:d}},{12:12,125:125,39:39,41:41,51:51,6:6,66:66,7:7,93:93}],22:[function(t,n,r){"use strict";var e=t(40),i=t(33),o=t(94),u=t(93),c=t(66),a=t(39),f=t(6),s=t(51),l=t(35),h=t(56),v=t(101),p=t(45);n.exports=function(t,n,r,d,y,g){var m=e[t],b=m,x=y?"set":"add",S=b&&b.prototype,w={},_=function(t){var n=S[t];o(S,t,"delete"==t?function(t){return!(g&&!s(t))&&n.call(this,0===t?0:t)}:"has"==t?function has(t){return!(g&&!s(t))&&n.call(this,0===t?0:t)}:"get"==t?function get(t){return g&&!s(t)?void 0:n.call(this,0===t?0:t)}:"add"==t?function add(t){return n.call(this,0===t?0:t),this}:function set(t,r){return n.call(this,0===t?0:t,r),this})};if("function"==typeof b&&(g||S.forEach&&!l(function(){(new b).entries().next()}))){var E=new b,O=E[x](g?{}:-0,1)!=E,P=l(function(){E.has(1)}),M=h(function(t){new b(t)}),F=!g&&l(function(){for(var t=new b,n=5;n--;)t[x](n,n);return!t.has(-0)});M||(b=n(function(n,r){f(n,b,t);var e=p(new m,n,b);return void 0!=r&&a(r,y,e[x],e),e}),b.prototype=S,S.constructor=b),(P||F)&&(_("delete"),_("has"),y&&_("get")),(F||O)&&_(x),g&&S.clear&&delete S.clear}else b=d.getConstructor(n,t,y,x),u(b.prototype,r),c.NEED=!0;return v(b,t),w[t]=b,i(i.G+i.W+i.F*(b!=m),w),g||d.setStrong(b,t,y),b}},{101:101,33:33,35:35,39:39,40:40,45:45,51:51,56:56,6:6,66:66,93:93,94:94}],23:[function(t,n,r){var e=n.exports={version:"2.5.0"};"number"==typeof __e&&(__e=e)},{}],24:[function(t,n,r){"use strict";var e=t(72),i=t(92);n.exports=function(t,n,r){n in t?e.f(t,n,i(0,r)):t[n]=r}},{72:72,92:92}],25:[function(t,n,r){var e=t(3);n.exports=function(t,n,r){if(e(t),void 0===n)return t;switch(r){case 1:return function(r){return t.call(n,r)};case 2:return function(r,e){return t.call(n,r,e)};case 3:return function(r,e,i){return t.call(n,r,e,i)}}return function(){return t.apply(n,arguments)}}},{3:3}],26:[function(t,n,r){"use strict";var e=t(35),i=Date.prototype.getTime,o=Date.prototype.toISOString,u=function(t){return t>9?t:"0"+t};n.exports=e(function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-5e13-1))})||!e(function(){o.call(new Date(NaN))})?function toISOString(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var t=this,n=t.getUTCFullYear(),r=t.getUTCMilliseconds(),e=n<0?"-":n>9999?"+":"";return e+("00000"+Math.abs(n)).slice(e?-6:-4)+"-"+u(t.getUTCMonth()+1)+"-"+u(t.getUTCDate())+"T"+u(t.getUTCHours())+":"+u(t.getUTCMinutes())+":"+u(t.getUTCSeconds())+"."+(r>99?r:"0"+u(r))+"Z"}:o},{35:35}],27:[function(t,n,r){"use strict";var e=t(7),i=t(120);n.exports=function(t){if("string"!==t&&"number"!==t&&"default"!==t)throw TypeError("Incorrect hint");return i(e(this),"number"!=t)}},{120:120,7:7}],28:[function(t,n,r){n.exports=function(t){if(void 0==t)throw TypeError("Can't call method on "+t);return t}},{}],29:[function(t,n,r){n.exports=!t(35)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},{35:35}],30:[function(t,n,r){var e=t(51),i=t(40).document,o=e(i)&&e(i.createElement);n.exports=function(t){return o?i.createElement(t):{}}},{40:40,51:51}],31:[function(t,n,r){n.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},{}],32:[function(t,n,r){var e=t(81),i=t(78),o=t(82);n.exports=function(t){var n=e(t),r=i.f;if(r)for(var u,c=r(t),a=o.f,f=0;c.length>f;)a.call(t,u=c[f++])&&n.push(u);return n}},{78:78,81:81,82:82}],33:[function(t,n,r){var e=t(40),i=t(23),o=t(42),u=t(94),c=t(25),a=function(t,n,r){var f,s,l,h,v=t&a.F,p=t&a.G,d=t&a.S,y=t&a.P,g=t&a.B,m=p?e:d?e[n]||(e[n]={}):(e[n]||{}).prototype,b=p?i:i[n]||(i[n]={}),x=b.prototype||(b.prototype={});p&&(r=n);for(f in r)s=!v&&m&&void 0!==m[f],l=(s?m:r)[f],h=g&&s?c(l,e):y&&"function"==typeof l?c(Function.call,l):l,m&&u(m,f,l,t&a.U),b[f]!=l&&o(b,f,h),y&&x[f]!=l&&(x[f]=l)};e.core=i,a.F=1,a.G=2,a.S=4,a.P=8,a.B=16,a.W=32,a.U=64,a.R=128,n.exports=a},{23:23,25:25,40:40,42:42,94:94}],34:[function(t,n,r){var e=t(128)("match");n.exports=function(t){var n=/./;try{"/./"[t](n)}catch(r){try{return n[e]=!1,!"/./"[t](n)}catch(t){}}return!0}},{128:128}],35:[function(t,n,r){n.exports=function(t){try{return!!t()}catch(t){return!0}}},{}],36:[function(t,n,r){"use strict";var e=t(42),i=t(94),o=t(35),u=t(28),c=t(128);n.exports=function(t,n,r){var a=c(t),f=r(u,a,""[t]),s=f[0],l=f[1];o(function(){var n={};return n[a]=function(){return 7},7!=""[t](n)})&&(i(String.prototype,t,s),e(RegExp.prototype,a,2==n?function(t,n){return l.call(t,this,n)}:function(t){return l.call(t,this)}))}},{128:128,28:28,35:35,42:42,94:94}],37:[function(t,n,r){"use strict";var e=t(7);n.exports=function(){var t=e(this),n="";return t.global&&(n+="g"),t.ignoreCase&&(n+="i"),t.multiline&&(n+="m"),t.unicode&&(n+="u"),t.sticky&&(n+="y"),n}},{7:7}],38:[function(t,n,r){"use strict";function flattenIntoArray(t,n,r,a,f,s,l,h){for(var v,p,d=f,y=0,g=!!l&&u(l,h,3);y0)d=flattenIntoArray(t,n,v,o(v.length),d,s-1)-1;else{if(d>=9007199254740991)throw TypeError();t[d]=v}d++}y++}return d}var e=t(49),i=t(51),o=t(118),u=t(25),c=t(128)("isConcatSpreadable");n.exports=flattenIntoArray},{118:118,128:128,25:25,49:49,51:51}],39:[function(t,n,r){var e=t(25),i=t(53),o=t(48),u=t(7),c=t(118),a=t(129),f={},s={},r=n.exports=function(t,n,r,l,h){var v,p,d,y,g=h?function(){return t}:a(t),m=e(r,l,n?2:1),b=0;if("function"!=typeof g)throw TypeError(t+" is not iterable!");if(o(g)){for(v=c(t.length);v>b;b++)if((y=n?m(u(p=t[b])[0],p[1]):m(t[b]))===f||y===s)return y}else for(d=g.call(t);!(p=d.next()).done;)if((y=i(d,m,p.value,n))===f||y===s)return y};r.BREAK=f,r.RETURN=s},{118:118,129:129,25:25,48:48,53:53,7:7}],40:[function(t,n,r){var e=n.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=e)},{}],41:[function(t,n,r){var e={}.hasOwnProperty;n.exports=function(t,n){return e.call(t,n)}},{}],42:[function(t,n,r){var e=t(72),i=t(92);n.exports=t(29)?function(t,n,r){return e.f(t,n,i(1,r))}:function(t,n,r){return t[n]=r,t}},{29:29,72:72,92:92}],43:[function(t,n,r){var e=t(40).document;n.exports=e&&e.documentElement},{40:40}],44:[function(t,n,r){n.exports=!t(29)&&!t(35)(function(){return 7!=Object.defineProperty(t(30)("div"),"a",{get:function(){return 7}}).a})},{29:29,30:30,35:35}],45:[function(t,n,r){var e=t(51),i=t(99).set;n.exports=function(t,n,r){var o,u=n.constructor;return u!==r&&"function"==typeof u&&(o=u.prototype)!==r.prototype&&e(o)&&i&&i(t,o),t}},{51:51,99:99}],46:[function(t,n,r){n.exports=function(t,n,r){var e=void 0===r;switch(n.length){case 0:return e?t():t.call(r);case 1:return e?t(n[0]):t.call(r,n[0]);case 2:return e?t(n[0],n[1]):t.call(r,n[0],n[1]);case 3:return e?t(n[0],n[1],n[2]):t.call(r,n[0],n[1],n[2]);case 4:return e?t(n[0],n[1],n[2],n[3]):t.call(r,n[0],n[1],n[2],n[3])}return t.apply(r,n)}},{}],47:[function(t,n,r){var e=t(18);n.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==e(t)?t.split(""):Object(t)}},{18:18}],48:[function(t,n,r){var e=t(58),i=t(128)("iterator"),o=Array.prototype;n.exports=function(t){return void 0!==t&&(e.Array===t||o[i]===t)}},{128:128,58:58}],49:[function(t,n,r){var e=t(18);n.exports=Array.isArray||function isArray(t){return"Array"==e(t)}},{18:18}],50:[function(t,n,r){var e=t(51),i=Math.floor;n.exports=function isInteger(t){return!e(t)&&isFinite(t)&&i(t)===t}},{51:51}],51:[function(t,n,r){n.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},{}],52:[function(t,n,r){var e=t(51),i=t(18),o=t(128)("match");n.exports=function(t){var n;return e(t)&&(void 0!==(n=t[o])?!!n:"RegExp"==i(t))}},{128:128,18:18,51:51}],53:[function(t,n,r){var e=t(7);n.exports=function(t,n,r,i){try{return i?n(e(r)[0],r[1]):n(r)}catch(n){var o=t.return;throw void 0!==o&&e(o.call(t)),n}}},{7:7}],54:[function(t,n,r){"use strict";var e=t(71),i=t(92),o=t(101),u={};t(42)(u,t(128)("iterator"),function(){return this}),n.exports=function(t,n,r){t.prototype=e(u,{next:i(1,r)}),o(t,n+" Iterator")}},{101:101,128:128,42:42,71:71,92:92}],55:[function(t,n,r){"use strict";var e=t(60),i=t(33),o=t(94),u=t(42),c=t(41),a=t(58),f=t(54),s=t(101),l=t(79),h=t(128)("iterator"),v=!([].keys&&"next"in[].keys()),p=function(){return this};n.exports=function(t,n,r,d,y,g,m){f(r,n,d);var b,x,S,w=function(t){if(!v&&t in P)return P[t];switch(t){case"keys":return function keys(){return new r(this,t)};case"values":return function values(){return new r(this,t)}}return function entries(){return new r(this,t)}},_=n+" Iterator",E="values"==y,O=!1,P=t.prototype,M=P[h]||P["@@iterator"]||y&&P[y],F=M||w(y),I=y?E?w("entries"):F:void 0,A="Array"==n?P.entries||M:M;if(A&&(S=l(A.call(new t)))!==Object.prototype&&S.next&&(s(S,_,!0),e||c(S,h)||u(S,h,p)),E&&M&&"values"!==M.name&&(O=!0,F=function values(){return M.call(this)}),e&&!m||!v&&!O&&P[h]||u(P,h,F),a[n]=F,a[_]=p,y)if(b={values:E?F:w("values"),keys:g?F:w("keys"),entries:I},m)for(x in b)x in P||o(P,x,b[x]);else i(i.P+i.F*(v||O),n,b);return b}},{101:101,128:128,33:33,41:41,42:42,54:54,58:58,60:60,79:79,94:94}],56:[function(t,n,r){var e=t(128)("iterator"),i=!1;try{var o=[7][e]();o.return=function(){i=!0},Array.from(o,function(){throw 2})}catch(t){}n.exports=function(t,n){if(!n&&!i)return!1;var r=!1;try{var o=[7],u=o[e]();u.next=function(){return{done:r=!0}},o[e]=function(){return u},t(o)}catch(t){}return r}},{128:128}],57:[function(t,n,r){n.exports=function(t,n){return{value:n,done:!!t}}},{}],58:[function(t,n,r){n.exports={}},{}],59:[function(t,n,r){var e=t(81),i=t(117);n.exports=function(t,n){for(var r,o=i(t),u=e(o),c=u.length,a=0;c>a;)if(o[r=u[a++]]===n)return r}},{117:117,81:81}],60:[function(t,n,r){n.exports=!1},{}],61:[function(t,n,r){var e=Math.expm1;n.exports=!e||e(10)>22025.465794806718||e(10)<22025.465794806718||-2e-17!=e(-2e-17)?function expm1(t){return 0==(t=+t)?t:t>-1e-6&&t<1e-6?t+t*t/2:Math.exp(t)-1}:e},{}],62:[function(t,n,r){var e=t(65),i=Math.pow,o=i(2,-52),u=i(2,-23),c=i(2,127)*(2-u),a=i(2,-126),f=function(t){return t+1/o-1/o};n.exports=Math.fround||function fround(t){var n,r,i=Math.abs(t),s=e(t);return ic||r!=r?s*(1/0):s*r)}},{65:65}],63:[function(t,n,r){n.exports=Math.log1p||function log1p(t){return(t=+t)>-1e-8&&t<1e-8?t-t*t/2:Math.log(1+t)}},{}],64:[function(t,n,r){n.exports=Math.scale||function scale(t,n,r,e,i){return 0===arguments.length||t!=t||n!=n||r!=r||e!=e||i!=i?NaN:t===1/0||t===-1/0?t:(t-n)*(i-e)/(r-n)+e}},{}],65:[function(t,n,r){n.exports=Math.sign||function sign(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},{}],66:[function(t,n,r){var e=t(124)("meta"),i=t(51),o=t(41),u=t(72).f,c=0,a=Object.isExtensible||function(){return!0},f=!t(35)(function(){return a(Object.preventExtensions({}))}),s=function(t){u(t,e,{value:{i:"O"+ ++c,w:{}}})},l=function(t,n){if(!i(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!o(t,e)){if(!a(t))return"F";if(!n)return"E";s(t)}return t[e].i},h=function(t,n){if(!o(t,e)){if(!a(t))return!0;if(!n)return!1;s(t)}return t[e].w},v=function(t){return f&&p.NEED&&a(t)&&!o(t,e)&&s(t),t},p=n.exports={KEY:e,NEED:!1,fastKey:l,getWeak:h,onFreeze:v}},{124:124,35:35,41:41,51:51,72:72}],67:[function(t,n,r){var e=t(160),i=t(33),o=t(103)("metadata"),u=o.store||(o.store=new(t(266))),c=function(t,n,r){var i=u.get(t);if(!i){if(!r)return;u.set(t,i=new e)}var o=i.get(n);if(!o){if(!r)return;i.set(n,o=new e)}return o},a=function(t,n,r){var e=c(n,r,!1);return void 0!==e&&e.has(t)},f=function(t,n,r){var e=c(n,r,!1);return void 0===e?void 0:e.get(t)},s=function(t,n,r,e){c(r,e,!0).set(t,n)},l=function(t,n){var r=c(t,n,!1),e=[];return r&&r.forEach(function(t,n){e.push(n)}),e},h=function(t){return void 0===t||"symbol"==typeof t?t:String(t)},v=function(t){i(i.S,"Reflect",t)};n.exports={store:u,map:c,has:a,get:f,set:s,keys:l,key:h,exp:v}},{103:103,160:160,266:266,33:33}],68:[function(t,n,r){var e=t(40),i=t(113).set,o=e.MutationObserver||e.WebKitMutationObserver,u=e.process,c=e.Promise,a="process"==t(18)(u);n.exports=function(){var t,n,r,f=function(){var e,i;for(a&&(e=u.domain)&&e.exit();t;){i=t.fn,t=t.next;try{i()}catch(e){throw t?r():n=void 0,e}}n=void 0,e&&e.enter()};if(a)r=function(){u.nextTick(f)};else if(o){var s=!0,l=document.createTextNode("");new o(f).observe(l,{characterData:!0}),r=function(){l.data=s=!s}}else if(c&&c.resolve){var h=c.resolve();r=function(){h.then(f)}}else r=function(){i.call(e,f)};return function(e){var i={fn:e,next:void 0};n&&(n.next=i),t||(t=i,r()),n=i}}},{113:113,18:18,40:40}],69:[function(t,n,r){"use strict";function PromiseCapability(t){var n,r;this.promise=new t(function(t,e){if(void 0!==n||void 0!==r)throw TypeError("Bad Promise constructor");n=t,r=e}),this.resolve=e(n),this.reject=e(r)}var e=t(3);n.exports.f=function(t){return new PromiseCapability(t)}},{3:3}],70:[function(t,n,r){"use strict";var e=t(81),i=t(78),o=t(82),u=t(119),c=t(47),a=Object.assign;n.exports=!a||t(35)(function(){var t={},n={},r=Symbol(),e="abcdefghijklmnopqrst";return t[r]=7,e.split("").forEach(function(t){n[t]=t}),7!=a({},t)[r]||Object.keys(a({},n)).join("")!=e})?function assign(t,n){for(var r=u(t),a=arguments.length,f=1,s=i.f,l=o.f;a>f;)for(var h,v=c(arguments[f++]),p=s?e(v).concat(s(v)):e(v),d=p.length,y=0;d>y;)l.call(v,h=p[y++])&&(r[h]=v[h]);return r}:a},{119:119,35:35,47:47,78:78,81:81,82:82}],71:[function(t,n,r){var e=t(7),i=t(73),o=t(31),u=t(102)("IE_PROTO"),c=function(){},a=function(){var n,r=t(30)("iframe"),e=o.length;for(r.style.display="none",t(43).appendChild(r),r.src="javascript:",n=r.contentWindow.document,n.open(),n.write(" From ea916704e1492221e250fa86ef51ff6c74658414 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Fri, 5 Feb 2021 18:49:23 +0800 Subject: [PATCH 22/36] add attachment --- modules/storage/obs.go | 2 ++ routers/repo/attachment.go | 2 +- routers/repo/modelarts.go | 3 +-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/storage/obs.go b/modules/storage/obs.go index c80b9612f..86f98590d 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -16,6 +16,7 @@ import ( ) //check if has the object +//todo:修改查询方式 func ObsHasObject(path string) (bool, error) { hasObject := false output, err := ObsCli.ListObjects(&obs.ListObjectsInput{Bucket:setting.Bucket}) @@ -25,6 +26,7 @@ func ObsHasObject(path string) (bool, error) { } for _, obj := range output.Contents { + //obj.Key:attachment/0/1/019fd24e-4ef7-41cc-9f85-4a7b8504d958 if path == obj.Key { hasObject = true break diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 2be78a2e0..77b6fff85 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -292,7 +292,7 @@ func AddAttachment(ctx *context.Context) { return } } else { - has, err = storage.ObsHasObject(models.AttachmentRelativePath(uuid)) + has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid)) if err != nil { ctx.ServerError("ObsHasObject", err) return diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index cf12eaece..40b985110 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -49,9 +49,8 @@ func ModelArtsIndex(ctx *context.Context) { return } - timestamp := time.Now().Unix() for i, task := range ciTasks { - if task.Status == string(models.JobRunning) && (timestamp-int64(task.CreatedUnix) > 30) { + if task.Status == string(models.JobRunning) { ciTasks[i].CanDebug = true } else { ciTasks[i].CanDebug = false From 56237691ea67d40a02de6500b886cc3b825ab014 Mon Sep 17 00:00:00 2001 From: Gitea Date: Fri, 5 Feb 2021 18:54:05 +0800 Subject: [PATCH 23/36] update --- web_src/js/components/MinioUploader.vue | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/web_src/js/components/MinioUploader.vue b/web_src/js/components/MinioUploader.vue index b0f80bc82..644b879a5 100755 --- a/web_src/js/components/MinioUploader.vue +++ b/web_src/js/components/MinioUploader.vue @@ -122,14 +122,12 @@ export default { file.status = 'success'; this.dropzoneUploader.emit('success', file); this.dropzoneUploader.emit('complete', file); - // this.finishUpload(file) - }, - emitDropzoneFailed(file) { - this.status = this.dropzoneParams.data('falied'); - file.status = 'error'; - this.dropzoneUploader.emit('error', file); - // this.dropzoneUploader.emit('complete', file); }, + // emitDropzoneFailed(file) { + // this.status = this.dropzoneParams.data('falied'); + // file.status = 'error'; + // this.dropzoneUploader.emit('error', file); + // }, onFileAdded(file) { this.resetStatus(); @@ -137,7 +135,7 @@ export default { this.do_multi_uploader(file) }, - // 获取key + // 获取key, uuid get_result(){ var res $.ajax({ @@ -162,13 +160,13 @@ export default { // 断点续传 do_multi_uploader(file){ - console.log("file = ", file) + const result = this.get_result() - const _this = this const upload_datasetId = document .getElementById('datasetId') .getAttribute('datasetId'); const obsClient = this.getObsClient() + const _this = this var cp; var hook; @@ -192,6 +190,7 @@ export default { // 文件上传成功 if(eventType == 'completeMultipartUploadSucceed'){ + console.log("file = ", file) $.ajax({ url: '/attachments/add', type: 'POST', @@ -206,12 +205,12 @@ export default { async: false, success: function (data) { _this.progress = 100; - _this.status = this.dropzoneParams.data('upload-complete'); + _this.status = _this.dropzoneParams.data('upload-complete'); _this.emitDropzoneSuccess(file) }, error: function(){ console.log("发送/attachments/add的post错误1") - _this.emitDropzoneFailed(file) + // _this.emitDropzoneFailed(file) } }); @@ -254,16 +253,16 @@ export default { async: false, success: function (data) { _this.progress = 100; - _this.status = this.dropzoneParams.data('upload-complete'); - _this.emitDropzoneSuccess(file) + _this.status = _this.dropzoneParams.data('upload-complete'); + // _this.emitDropzoneSuccess(file) }, error: function(){ - _this.emitDropzoneFailed(file) + // _this.emitDropzoneFailed(file) console.log("发送/attachments/add的post错误2") } }); }else if (eventType == 'uploadPartFailed'){ - _this.emitDropzoneFailed(file) + // _this.emitDropzoneFailed(file) console.log("经过断点上传之后还是不能上传成功,该分段上传失败") } } From b355f51354615e534cd2a465509d8cff660d1415 Mon Sep 17 00:00:00 2001 From: Gitea Date: Sat, 6 Feb 2021 17:00:29 +0800 Subject: [PATCH 24/36] add obs uploader. --- options/locale/locale_zh-CN.ini | 3 + templates/repo/datasets/index.tmpl | 20 +- templates/repo/header.tmpl | 35 ++- web_src/js/components/MinioUploader.vue | 459 +++++++++++++++++++++----------- web_src/js/components/ObsUploader.vue | 298 +++++++++++++++++++++ web_src/js/index.js | 17 ++ 6 files changed, 667 insertions(+), 165 deletions(-) create mode 100755 web_src/js/components/ObsUploader.vue diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index 4d6295370..fec135a7f 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -758,6 +758,9 @@ balance.total_view=余额总览 balance.available=可用余额: cloudbrain1=云脑1 cloudbrain2=云脑2 +cloudbrain_selection=云脑选择 +cloudbrain_platform_selection=选择您准备使用的云脑平台: +confirm_choice=确定 template.items=模板选项 template.git_content=Git数据(默认分支) diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index e7fc7516c..dc3b82e4e 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -1,6 +1,18 @@ {{template "base/head" .}}
- {{template "repo/header" .}} + {{template "repo/header" .}} +
+
From 596bbdfa58dd6f8ac2dc38ea9bfb3da7178022bb Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sun, 7 Feb 2021 17:55:28 +0800 Subject: [PATCH 28/36] obs key --- routers/repo/attachment.go | 4 ++++ web_src/js/components/ObsUploader.vue | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 77b6fff85..30e57edfd 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -609,6 +609,10 @@ func GetObsKey(ctx *context.Context) { ctx.JSON(200, map[string]string{ "uuid": uuid, "key": key, + "access_key_id": setting.AccessKeyID, + "secret_access_key": setting.SecretAccessKey, + "server": setting.Endpoint, + "bucket": setting.Bucket, }) } diff --git a/web_src/js/components/ObsUploader.vue b/web_src/js/components/ObsUploader.vue index 9ab59f320..e61981c7e 100755 --- a/web_src/js/components/ObsUploader.vue +++ b/web_src/js/components/ObsUploader.vue @@ -142,11 +142,11 @@ export default { }, // 构建ObsClient - getObsClient(){ + getObsClient(result){ return new ObsClient({ - access_key_id: 'FDP3LRMHLB9S77VWEHE3', - secret_access_key: 'LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN', - server : 'https://obs.cn-south-222.ai.pcl.cn' + access_key_id: result.access_key_id, + secret_access_key: result.secret_access_key, + server : result.server }); }, @@ -157,13 +157,13 @@ export default { const upload_datasetId = document .getElementById('datasetId') .getAttribute('datasetId'); - const obsClient = this.getObsClient() + const obsClient = this.getObsClient(result) const _this = this var cp; var hook; obsClient.uploadFile({ - Bucket : 'testopendata', + Bucket : result.bucket, Key : result.key, SourceFile : file, PartSize : 64 * 1024 * 1024, From 7c0baefdf4a0e62a28e5408f64b39174a91ea72f Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Thu, 18 Feb 2021 11:00:06 +0800 Subject: [PATCH 29/36] config --- custom/conf/app.ini.sample | 2 -- 1 file changed, 2 deletions(-) diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index 49daa6e8d..a027624f4 100755 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -1071,7 +1071,6 @@ HOST = http://192.168.207.84:3002/ COMMIT_VALID_DATE = 2021-01-15 [obs] -#ENDPOINT = 112.95.163.82 ENDPOINT = https://obs.cn-south-222.ai.pcl.cn ACCESS_KEY_ID = FDP3LRMHLB9S77VWEHE3 SECRET_ACCESS_KEY = LyM82Wk80pgjhs2z7AdDcsdpCWhbsJtSzQ7hkESN @@ -1080,7 +1079,6 @@ LOCATION = cn-south-222 BASE_PATH = attachment/ [modelarts] -#ENDPOINT = 112.95.163.80 ENDPOINT = https://modelarts.cn-south-222.ai.pcl.cn PROJECT_ID = edfccf24aace4e17a56da6bcbb55a5aa PROJECT_NAME = cn-south-222_test From fa356c58daeab00994262108f06175e359de4e66 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Thu, 18 Feb 2021 15:16:04 +0800 Subject: [PATCH 30/36] notebook --- models/cloudbrain.go | 2 +- modules/modelarts/resty.go | 72 ++++++++++++++++++++++------------------------ routers/repo/modelarts.go | 5 ---- 3 files changed, 36 insertions(+), 43 deletions(-) diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 036c25264..dea53e619 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -315,7 +315,7 @@ type Location struct { } type NotebookResult struct { - ErrorCode string `json:"error_code"` + ErrorCode int `json:"error_code"` ErrorMsg string `json:"error_msg"` } diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go index 95ad94d31..8f83990e0 100755 --- a/modules/modelarts/resty.go +++ b/modules/modelarts/resty.go @@ -106,12 +106,6 @@ sendjob: return nil, fmt.Errorf("resty create job: %s", err) } - if res.StatusCode() == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -119,7 +113,13 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if len(response.ErrorCode) != 0 { + if response.ErrorCode == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if response.ErrorCode != 0 { log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -145,14 +145,6 @@ sendjob: return nil, fmt.Errorf("resty GetJob: %v", err) } - if res.StatusCode() == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - log.Info(string(res.Body())) - var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -160,7 +152,13 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if len(response.ErrorCode) != 0 { + if response.ErrorCode == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if response.ErrorCode != 0 { log.Error("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -187,12 +185,6 @@ sendjob: return &result, fmt.Errorf("resty StopJob: %v", err) } - if res.StatusCode() == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -200,7 +192,13 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if len(response.ErrorCode) != 0 { + if response.ErrorCode == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if response.ErrorCode != 0 { log.Error("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -226,12 +224,6 @@ sendjob: return &result, fmt.Errorf("resty DelJob: %v", err) } - if res.StatusCode() == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -239,7 +231,13 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if len(response.ErrorCode) != 0 { + if response.ErrorCode == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if response.ErrorCode != 0 { log.Error("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -265,12 +263,6 @@ sendjob: return &result, fmt.Errorf("resty GetJobToken: %v", err) } - if res.StatusCode() == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -278,7 +270,13 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if len(response.ErrorCode) != 0 { + if response.ErrorCode == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + + if response.ErrorCode != 0 { log.Error("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) } diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index d23ef3c05..a893bf21b 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -165,11 +165,6 @@ func ModelArtsDebug(ctx *context.Context) { } urlPrefix := result.Spec.Annotations.TargetDomain + "/modelarts/internal/hub/notebook/user/" + task.JobID - - //https://console.ai.pcl.cn/modelarts/internal/hub/notebook/user/DE-afcdf674-6489-11eb-bfe7-0255ac100057/lab - //debugUrl := setting.DebugServerHost + "jpylab_" + task.JobID + "_" + task.SubTaskName - //debugUrl := setting.ModelArtsHost + "/modelarts/internal/hub/notebook/user/" + task.JobID + "/lab" - //debugUrl := "https://console.ai.pcl.cn/modelarts/internal/hub/notebook/user/" + task.JobID + "/lab" debugUrl := urlPrefix + "?token=" + res.Token ctx.Redirect(debugUrl) } From 13f3a4ac3c8c453f30d9786b5a801dccb298a6d5 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Fri, 19 Feb 2021 10:58:05 +0800 Subject: [PATCH 31/36] respone --- models/cloudbrain.go | 2 +- modules/modelarts/resty.go | 70 +++++++++++++++++++++++----------------------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/models/cloudbrain.go b/models/cloudbrain.go index dea53e619..036c25264 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -315,7 +315,7 @@ type Location struct { } type NotebookResult struct { - ErrorCode int `json:"error_code"` + ErrorCode string `json:"error_code"` ErrorMsg string `json:"error_msg"` } diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go index 8f83990e0..b8a8b36a0 100755 --- a/modules/modelarts/resty.go +++ b/modules/modelarts/resty.go @@ -106,6 +106,12 @@ sendjob: return nil, fmt.Errorf("resty create job: %s", err) } + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -113,13 +119,7 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if response.ErrorCode == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - if response.ErrorCode != 0 { + if len(response.ErrorCode) != 0 { log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -145,6 +145,12 @@ sendjob: return nil, fmt.Errorf("resty GetJob: %v", err) } + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -152,13 +158,7 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if response.ErrorCode == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - if response.ErrorCode != 0 { + if len(response.ErrorCode) != 0 { log.Error("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("GetJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -185,6 +185,12 @@ sendjob: return &result, fmt.Errorf("resty StopJob: %v", err) } + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -192,13 +198,7 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if response.ErrorCode == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - if response.ErrorCode != 0 { + if len(response.ErrorCode) != 0 { log.Error("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("StopJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -224,6 +224,12 @@ sendjob: return &result, fmt.Errorf("resty DelJob: %v", err) } + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -231,13 +237,7 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if response.ErrorCode == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - if response.ErrorCode != 0 { + if len(response.ErrorCode) != 0 { log.Error("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("DelJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } @@ -263,6 +263,12 @@ sendjob: return &result, fmt.Errorf("resty GetJobToken: %v", err) } + if res.StatusCode() == http.StatusUnauthorized && retry < 1 { + retry++ + _ = getToken() + goto sendjob + } + var response models.NotebookResult err = json.Unmarshal(res.Body(), &response) if err != nil { @@ -270,13 +276,7 @@ sendjob: return &result, fmt.Errorf("son.Unmarshal failed: %s", err.Error()) } - if response.ErrorCode == http.StatusUnauthorized && retry < 1 { - retry++ - _ = getToken() - goto sendjob - } - - if response.ErrorCode != 0 { + if len(response.ErrorCode) != 0 { log.Error("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) return &result, fmt.Errorf("GetJobToken failed(%s): %s", response.ErrorCode, response.ErrorMsg) } From 799587ce05d7427bf870d26814f61f1287dfdfd6 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Fri, 19 Feb 2021 17:53:27 +0800 Subject: [PATCH 32/36] download obs --- modules/storage/obs.go | 19 +++++++++++++++++++ routers/repo/attachment.go | 27 ++++++++++++++++++++++----- routers/repo/dataset.go | 1 + templates/repo/datasets/dataset_list.tmpl | 4 ++-- templates/repo/datasets/index.tmpl | 1 - 5 files changed, 44 insertions(+), 8 deletions(-) diff --git a/modules/storage/obs.go b/modules/storage/obs.go index 86f98590d..bf8fac54c 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -154,3 +154,22 @@ func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, part return req.URL.String(), nil } + +func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { + input := &obs.CreateSignedUrlInput{} + input.Method = obs.HttpMethodGet + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Bucket = setting.Bucket + input.Expires = 60 * 60 + + reqParams := make(map[string]string) + reqParams["response-content-disposition"] = "attachment; filename=\"" + fileName + "\"" + input.QueryParams = reqParams + output, err := ObsCli.CreateSignedUrl(input) + if err != nil { + log.Error("CreateSignedUrl failed:", err.Error()) + return "", err + } + + return output.SignedUrl, nil +} diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index 30e57edfd..ecfc8c407 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -143,6 +143,13 @@ func DeleteAttachment(ctx *context.Context) { // GetAttachment serve attachements func GetAttachment(ctx *context.Context) { + typeCloudBrain := ctx.QueryInt("type") + err := checkTypeCloudBrain(typeCloudBrain) + if err != nil { + ctx.ServerError("checkTypeCloudBrain failed", err) + return + } + attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid")) if err != nil { if models.IsErrAttachmentNotExist(err) { @@ -196,19 +203,29 @@ func GetAttachment(ctx *context.Context) { //If we have matched and access to release or issue if setting.Attachment.StoreType == storage.MinioStorageType { - url, err := storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name) - if err != nil { - ctx.ServerError("PresignedGetURL", err) - return + url := "" + if typeCloudBrain == models.TypeCloudBrainOne { + url, err = storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name) + if err != nil { + ctx.ServerError("PresignedGetURL", err) + return + } + } else { + url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name) + if err != nil { + ctx.ServerError("ObsGetPreSignedUrl", err) + return + } } + log.Info(url) + if err = increaseDownloadCount(attach, dataSet); err != nil { ctx.ServerError("Update", err) return } http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) - } else { fr, err := storage.Attachments.Open(attach.RelativePath()) if err != nil { diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go index 4d4fc4113..9c8557afa 100755 --- a/routers/repo/dataset.go +++ b/routers/repo/dataset.go @@ -80,6 +80,7 @@ func DatasetIndex(ctx *context.Context) { ctx.Data["Attachments"] = attachments ctx.Data["IsOwner"] = true ctx.Data["StoreType"] = setting.Attachment.StoreType + ctx.Data["Type"] = ctx.QueryInt("type") renderAttachmentSettings(ctx) diff --git a/templates/repo/datasets/dataset_list.tmpl b/templates/repo/datasets/dataset_list.tmpl index 53a8c8273..a86b7c6ca 100755 --- a/templates/repo/datasets/dataset_list.tmpl +++ b/templates/repo/datasets/dataset_list.tmpl @@ -3,7 +3,7 @@
@@ -14,7 +14,7 @@ {{svg "octicon-flame" 16}} {{(.DownloadCount | PrettyNumber)}}
-
+
{{svg "octicon-file" 16}}
diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index dc3b82e4e..413bce917 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -5,7 +5,6 @@ $(document).ready(function() { url = window.location.href type = url.split('?type=')[1] - console.log(type) if (type == 0){ $('.contorl_component').attr("id", 'minioUploader') }else{ From a8318daafee7111b13287f396079da34641f274e Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 20 Feb 2021 15:41:31 +0800 Subject: [PATCH 33/36] debug modelarts --- routers/repo/modelarts.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index a893bf21b..f7543ece2 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/unknwon/com" "strconv" + "strings" "time" "code.gitea.io/gitea/models" @@ -146,7 +147,7 @@ func ModelArtsShow(ctx *context.Context) { func ModelArtsDebug(ctx *context.Context) { var jobID = ctx.Params(":jobid") - task, err := models.GetCloudbrainByJobID(jobID) + _, err := models.GetCloudbrainByJobID(jobID) if err != nil { ctx.ServerError("GetCloudbrainByJobID failed", err) return @@ -164,7 +165,17 @@ func ModelArtsDebug(ctx *context.Context) { return } - urlPrefix := result.Spec.Annotations.TargetDomain + "/modelarts/internal/hub/notebook/user/" + task.JobID + + urls := strings.Split(result.Spec.Annotations.Url, "/") + urlPrefix := result.Spec.Annotations.TargetDomain + for i, url := range urls { + if i > 2 { + urlPrefix += "/" + url + } + } + + //urlPrefix := result.Spec.Annotations.TargetDomain + "/modelarts/internal/hub/notebook/user/" + task.JobID + log.Info(urlPrefix) debugUrl := urlPrefix + "?token=" + res.Token ctx.Redirect(debugUrl) } From eaeb6699c91c24fc8eab1ec169f91d2b8f6e73b4 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 20 Feb 2021 16:14:25 +0800 Subject: [PATCH 34/36] store path mod --- modules/modelarts/modelarts.go | 2 +- modules/storage/obs.go | 12 ++++++------ routers/repo/attachment.go | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index 775b345ad..844f37ffb 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -22,7 +22,7 @@ const ( ) func GenerateTask(ctx *context.Context, jobName, uuid, description string) error { - dataActualPath := setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + dataActualPath := setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + "/" jobResult, err := CreateJob(models.CreateNotebookParams{ JobName: jobName, Description:description, diff --git a/modules/storage/obs.go b/modules/storage/obs.go index bf8fac54c..d174b8fba 100755 --- a/modules/storage/obs.go +++ b/modules/storage/obs.go @@ -37,7 +37,7 @@ func ObsHasObject(path string) (bool, error) { } func GetObsPartInfos(uuid string, uploadID string) (string, error) { - key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") output, err := ObsCli.ListParts(&obs.ListPartsInput{ Bucket: setting.Bucket, @@ -60,7 +60,7 @@ func GetObsPartInfos(uuid string, uploadID string) (string, error) { func NewObsMultiPartUpload(uuid string) (string, error) { input := &obs.InitiateMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") output, err := ObsCli.InitiateMultipartUpload(input) if err != nil { @@ -74,7 +74,7 @@ func NewObsMultiPartUpload(uuid string) (string, error) { func CompleteObsMultiPartUpload(uuid string, uploadID string) error { input := &obs.CompleteMultipartUploadInput{} input.Bucket = setting.Bucket - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") input.UploadId = uploadID output, err := ObsCli.ListParts(&obs.ListPartsInput{ Bucket: setting.Bucket, @@ -105,7 +105,7 @@ func CompleteObsMultiPartUpload(uuid string, uploadID string) error { func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, body io.Reader) (string, error) { input := &obs.UploadPartInput{} input.PartNumber = partNumber - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") input.UploadId = uploadId input.Bucket = setting.Bucket input.PartSize = partSize @@ -141,7 +141,7 @@ func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, part */ - Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") req, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) if err != nil { log.Error("CreateSignedUrl failed:", err.Error()) @@ -158,7 +158,7 @@ func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, part func ObsGetPreSignedUrl(uuid, fileName string) (string, error) { input := &obs.CreateSignedUrlInput{} input.Method = obs.HttpMethodGet - input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") input.Bucket = setting.Bucket input.Expires = 60 * 60 diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go index ecfc8c407..6874a1eda 100755 --- a/routers/repo/attachment.go +++ b/routers/repo/attachment.go @@ -309,7 +309,7 @@ func AddAttachment(ctx *context.Context) { return } } else { - has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid)) + has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid) if err != nil { ctx.ServerError("ObsHasObject", err) return @@ -621,7 +621,7 @@ func GetMultipartUploadUrl(ctx *context.Context) { func GetObsKey(ctx *context.Context) { uuid := gouuid.NewV4().String() - key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") + key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") ctx.JSON(200, map[string]string{ "uuid": uuid, From 3e70eee7939ad2c9a894ebf8dc46f1ef23c1919f Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Sat, 20 Feb 2021 17:50:46 +0800 Subject: [PATCH 35/36] error_msg --- modules/modelarts/resty.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go index b8a8b36a0..32fd6e32f 100755 --- a/modules/modelarts/resty.go +++ b/modules/modelarts/resty.go @@ -121,6 +121,9 @@ sendjob: if len(response.ErrorCode) != 0 { log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) + if response.ErrorCode == "ModelArts.0118" { + response.ErrorMsg = "所选规格使用数量已超过最大配额限制。" + } return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) } From 3f4864a0bf3ed50f565b5c5969b4c5b65c3fc3d0 Mon Sep 17 00:00:00 2001 From: yuyuanshifu <747342561@qq.com> Date: Tue, 23 Feb 2021 09:35:32 +0800 Subject: [PATCH 36/36] error_code --- modules/modelarts/resty.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/modelarts/resty.go b/modules/modelarts/resty.go index 32fd6e32f..df020decb 100755 --- a/modules/modelarts/resty.go +++ b/modules/modelarts/resty.go @@ -23,7 +23,7 @@ const ( urlGetToken = "/v3/auth/tokens" urlNotebook = "/demanager/instances" - urlQueryNotebook = "/demanager/instances" + errorCodeExceedLimit = "ModelArts.0118" ) func getRestyClient() *resty.Client { if restyClient == nil { @@ -121,7 +121,7 @@ sendjob: if len(response.ErrorCode) != 0 { log.Error("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg) - if response.ErrorCode == "ModelArts.0118" { + if response.ErrorCode == errorCodeExceedLimit { response.ErrorMsg = "所选规格使用数量已超过最大配额限制。" } return &result, fmt.Errorf("CreateJob failed(%s): %s", response.ErrorCode, response.ErrorMsg)