diff --git a/modules/obs/auth.go b/modules/obs/auth.go
new file mode 100755
index 000000000..607a5ec39
--- /dev/null
+++ b/modules/obs/auth.go
@@ -0,0 +1,466 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+package obs
+
+import (
+ "fmt"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+)
+
+func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, expires int64) (requestURL string, err error) {
+ isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == ""
+ if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" {
+ if obsClient.conf.signature == SignatureObs {
+ params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken
+ } else {
+ params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken
+ }
+ }
+ requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
+ parsedRequestURL, err := url.Parse(requestURL)
+ if err != nil {
+ return "", err
+ }
+ encodeHeaders(headers)
+ hostName := parsedRequestURL.Host
+
+ isV4 := obsClient.conf.signature == SignatureV4
+ prepareHostAndDate(headers, hostName, isV4)
+
+ if isAkSkEmpty {
+ doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
+ } else {
+ if isV4 {
+ date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
+ if parseDateErr != nil {
+ doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
+ return "", parseDateErr
+ }
+ delete(headers, HEADER_DATE_CAMEL)
+ shortDate := date.Format(SHORT_DATE_FORMAT)
+ longDate := date.Format(LONG_DATE_FORMAT)
+ if len(headers[HEADER_HOST_CAMEL]) != 0 {
+ index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":")
+ if index != -1 {
+ port := headers[HEADER_HOST_CAMEL][0][index+1:]
+ if port == "80" || port == "443" {
+ headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]}
+ }
+ }
+
+ }
+
+ signedHeaders, _headers := getSignedHeaders(headers)
+
+ credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
+ params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
+ params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
+ params[PARAM_DATE_AMZ_CAMEL] = longDate
+ params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
+ params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")
+
+ requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true)
+ parsedRequestURL, _err := url.Parse(requestURL)
+ if _err != nil {
+ return "", _err
+ }
+
+ stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
+ signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
+
+ requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))
+
+ } else {
+ originDate := headers[HEADER_DATE_CAMEL][0]
+ date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate)
+ if parseDateErr != nil {
+ doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
+ return "", parseDateErr
+ }
+ expires += date.Unix()
+ headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}
+
+ stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs)
+ signature := UrlEncode(Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign))), false)
+ if strings.Index(requestURL, "?") < 0 {
+ requestURL += "?"
+ } else {
+ requestURL += "&"
+ }
+ delete(headers, HEADER_DATE_CAMEL)
+
+ if obsClient.conf.signature != SignatureObs {
+ requestURL += "AWS"
+ }
+ requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(obsClient.conf.securityProvider.ak, false), expires, signature)
+ }
+ }
+
+ return
+}
+
+func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, hostName string) (requestURL string, err error) {
+ isAkSkEmpty := obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == ""
+ if isAkSkEmpty == false && obsClient.conf.securityProvider.securityToken != "" {
+ if obsClient.conf.signature == SignatureObs {
+ headers[HEADER_STS_TOKEN_OBS] = []string{obsClient.conf.securityProvider.securityToken}
+ } else {
+ headers[HEADER_STS_TOKEN_AMZ] = []string{obsClient.conf.securityProvider.securityToken}
+ }
+ }
+ isObs := obsClient.conf.signature == SignatureObs
+ requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
+ parsedRequestURL, err := url.Parse(requestURL)
+ if err != nil {
+ return "", err
+ }
+ encodeHeaders(headers)
+
+ if hostName == "" {
+ hostName = parsedRequestURL.Host
+ }
+
+ isV4 := obsClient.conf.signature == SignatureV4
+ prepareHostAndDate(headers, hostName, isV4)
+
+ if isAkSkEmpty {
+ doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
+ } else {
+ ak := obsClient.conf.securityProvider.ak
+ sk := obsClient.conf.securityProvider.sk
+ var authorization string
+ if isV4 {
+ headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD}
+ ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers)
+ authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
+ } else {
+ ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
+ hashPrefix := V2_HASH_PREFIX
+ if isObs {
+ hashPrefix = OBS_HASH_PREFIX
+ }
+ authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"])
+ }
+ headers[HEADER_AUTH_CAMEL] = []string{authorization}
+ }
+ return
+}
+
+func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) {
+ headers[HEADER_HOST_CAMEL] = []string{hostName}
+ if date, ok := headers[HEADER_DATE_AMZ]; ok {
+ flag := false
+ if len(date) == 1 {
+ if isV4 {
+ if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
+ headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
+ flag = true
+ }
+ } else {
+ if strings.HasSuffix(date[0], "GMT") {
+ headers[HEADER_DATE_CAMEL] = []string{date[0]}
+ flag = true
+ }
+ }
+ }
+ if !flag {
+ delete(headers, HEADER_DATE_AMZ)
+ }
+ }
+ if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
+ headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
+ }
+}
+
+func encodeHeaders(headers map[string][]string) {
+ for key, values := range headers {
+ for index, value := range values {
+ values[index] = UrlEncode(value, true)
+ }
+ headers[key] = values
+ }
+}
+
+func attachHeaders(headers map[string][]string, isObs bool) string {
+ length := len(headers)
+ _headers := make(map[string][]string, length)
+ keys := make([]string, 0, length)
+
+ for key, value := range headers {
+ _key := strings.ToLower(strings.TrimSpace(key))
+ if _key != "" {
+ prefixheader := HEADER_PREFIX
+ if isObs {
+ prefixheader = HEADER_PREFIX_OBS
+ }
+ if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) {
+ keys = append(keys, _key)
+ _headers[_key] = value
+ }
+ } else {
+ delete(headers, key)
+ }
+ }
+
+ for _, interestedHeader := range interestedHeaders {
+ if _, ok := _headers[interestedHeader]; !ok {
+ _headers[interestedHeader] = []string{""}
+ keys = append(keys, interestedHeader)
+ }
+ }
+ dateCamelHeader := PARAM_DATE_AMZ_CAMEL
+ dataHeader := HEADER_DATE_AMZ
+ if isObs {
+ dateCamelHeader = PARAM_DATE_OBS_CAMEL
+ dataHeader = HEADER_DATE_OBS
+ }
+ if _, ok := _headers[HEADER_DATE_CAMEL]; ok {
+ if _, ok := _headers[dataHeader]; ok {
+ _headers[HEADER_DATE_CAMEL] = []string{""}
+ } else if _, ok := headers[dateCamelHeader]; ok {
+ _headers[HEADER_DATE_CAMEL] = []string{""}
+ }
+ } else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
+ if _, ok := _headers[dataHeader]; ok {
+ _headers[HEADER_DATE_CAMEL] = []string{""}
+ } else if _, ok := headers[dateCamelHeader]; ok {
+ _headers[HEADER_DATE_CAMEL] = []string{""}
+ }
+ }
+
+ sort.Strings(keys)
+
+ stringToSign := make([]string, 0, len(keys))
+ for _, key := range keys {
+ var value string
+ prefixHeader := HEADER_PREFIX
+ prefixMetaHeader := HEADER_PREFIX_META
+ if isObs {
+ prefixHeader = HEADER_PREFIX_OBS
+ prefixMetaHeader = HEADER_PREFIX_META_OBS
+ }
+ if strings.HasPrefix(key, prefixHeader) {
+ if strings.HasPrefix(key, prefixMetaHeader) {
+ for index, v := range _headers[key] {
+ value += strings.TrimSpace(v)
+ if index != len(_headers[key])-1 {
+ value += ","
+ }
+ }
+ } else {
+ value = strings.Join(_headers[key], ",")
+ }
+ value = fmt.Sprintf("%s:%s", key, value)
+ } else {
+ value = strings.Join(_headers[key], ",")
+ }
+ stringToSign = append(stringToSign, value)
+ }
+ return strings.Join(stringToSign, "\n")
+}
+
+func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string {
+ stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "")
+
+ var isSecurityToken bool
+ var securityToken []string
+ if isObs {
+ securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]
+ } else {
+ securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
+ }
+ var query []string
+ if !isSecurityToken {
+ parmas := strings.Split(canonicalizedURL, "?")
+ if len(parmas) > 1 {
+ query = strings.Split(parmas[1], "&")
+ for _, value := range query {
+ if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
+ if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
+ securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
+ isSecurityToken = true
+ }
+ }
+ }
+ }
+ }
+ logStringToSign := stringToSign
+ if isSecurityToken && len(securityToken) > 0 {
+ logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1)
+ }
+ doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign)
+ return stringToSign
+}
+
+func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string {
+ stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
+ return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))}
+}
+
+func getScope(region, shortDate string) string {
+ return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
+}
+
+func getCredential(ak, region, shortDate string) (string, string) {
+ scope := getScope(region, shortDate)
+ return fmt.Sprintf("%s/%s", ak, scope), scope
+}
+
+func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
+ canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
+ canonicalRequest = append(canonicalRequest, method)
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, canonicalizedURL)
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, queryURL)
+ canonicalRequest = append(canonicalRequest, "\n")
+
+ for _, signedHeader := range signedHeaders {
+ values, _ := headers[signedHeader]
+ for _, value := range values {
+ canonicalRequest = append(canonicalRequest, signedHeader)
+ canonicalRequest = append(canonicalRequest, ":")
+ canonicalRequest = append(canonicalRequest, value)
+ canonicalRequest = append(canonicalRequest, "\n")
+ }
+ }
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, payload)
+
+ _canonicalRequest := strings.Join(canonicalRequest, "")
+
+ var isSecurityToken bool
+ var securityToken []string
+ if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken {
+ securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
+ }
+ var query []string
+ if !isSecurityToken {
+ query = strings.Split(queryURL, "&")
+ for _, value := range query {
+ if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
+ if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
+ securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
+ isSecurityToken = true
+ }
+ }
+ }
+ }
+ logCanonicalRequest := _canonicalRequest
+ if isSecurityToken && len(securityToken) > 0 {
+ logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1)
+ }
+ doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest)
+
+ stringToSign := make([]string, 0, 7)
+ stringToSign = append(stringToSign, V4_HASH_PREFIX)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, longDate)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, scope)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))
+
+ _stringToSign := strings.Join(stringToSign, "")
+
+ doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign)
+ return _stringToSign
+}
+
+func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
+ length := len(headers)
+ _headers := make(map[string][]string, length)
+ signedHeaders := make([]string, 0, length)
+ for key, value := range headers {
+ _key := strings.ToLower(strings.TrimSpace(key))
+ if _key != "" {
+ signedHeaders = append(signedHeaders, _key)
+ _headers[_key] = value
+ } else {
+ delete(headers, key)
+ }
+ }
+ sort.Strings(signedHeaders)
+ return signedHeaders, _headers
+}
+
+func getSignature(stringToSign, sk, region, shortDate string) string {
+ key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
+ key = HmacSha256(key, []byte(region))
+ key = HmacSha256(key, []byte(V4_SERVICE_NAME))
+ key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
+ return Hex(HmacSha256(key, []byte(stringToSign)))
+}
+
+// V4Auth is a wrapper for v4Auth
+func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
+ return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers)
+}
+
+func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
+ var t time.Time
+ if val, ok := headers[HEADER_DATE_AMZ]; ok {
+ var err error
+ t, err = time.Parse(LONG_DATE_FORMAT, val[0])
+ if err != nil {
+ t = time.Now().UTC()
+ }
+ } else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok {
+ var err error
+ t, err = time.Parse(LONG_DATE_FORMAT, val[0])
+ if err != nil {
+ t = time.Now().UTC()
+ }
+ } else if val, ok := headers[HEADER_DATE_CAMEL]; ok {
+ var err error
+ t, err = time.Parse(RFC1123_FORMAT, val[0])
+ if err != nil {
+ t = time.Now().UTC()
+ }
+ } else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
+ var err error
+ t, err = time.Parse(RFC1123_FORMAT, val[0])
+ if err != nil {
+ t = time.Now().UTC()
+ }
+ } else {
+ t = time.Now().UTC()
+ }
+ shortDate := t.Format(SHORT_DATE_FORMAT)
+ longDate := t.Format(LONG_DATE_FORMAT)
+
+ signedHeaders, _headers := getSignedHeaders(headers)
+
+ credential, scope := getCredential(ak, region, shortDate)
+
+ payload := UNSIGNED_PAYLOAD
+ if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok {
+ payload = val[0]
+ }
+ stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers)
+
+ signature := getSignature(stringToSign, sk, region, shortDate)
+
+ ret := make(map[string]string, 3)
+ ret["Credential"] = credential
+ ret["SignedHeaders"] = strings.Join(signedHeaders, ";")
+ ret["Signature"] = signature
+ return ret
+}
diff --git a/modules/obs/client.go b/modules/obs/client.go
new file mode 100755
index 000000000..731f9f465
--- /dev/null
+++ b/modules/obs/client.go
@@ -0,0 +1,1307 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ObsClient defines OBS client.
+type ObsClient struct {
+ conf *config
+ httpClient *http.Client
+}
+
+// New creates a new ObsClient instance.
+func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) {
+ conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, endpoint: endpoint}
+ conf.maxRetryCount = -1
+ conf.maxRedirectCount = -1
+ for _, configurer := range configurers {
+ configurer(conf)
+ }
+
+ if err := conf.initConfigWithDefault(); err != nil {
+ return nil, err
+ }
+ err := conf.getTransport()
+ if err != nil {
+ return nil, err
+ }
+
+ if isWarnLogEnabled() {
+ info := make([]string, 3)
+ info[0] = fmt.Sprintf("[OBS SDK Version=%s", obsSdkVersion)
+ info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint)
+ accessMode := "Virtual Hosting"
+ if conf.pathStyle {
+ accessMode = "Path"
+ }
+ info[2] = fmt.Sprintf("Access Mode=%s]", accessMode)
+ doLog(LEVEL_WARN, strings.Join(info, "];["))
+ }
+ doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
+ obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}}
+ return obsClient, nil
+}
+
+// Refresh refreshes ak, sk and securityToken for obsClient.
+func (obsClient ObsClient) Refresh(ak, sk, securityToken string) {
+ sp := &securityProvider{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)}
+ obsClient.conf.securityProvider = sp
+}
+
+// Close closes ObsClient.
+func (obsClient ObsClient) Close() {
+ obsClient.httpClient = nil
+ obsClient.conf.transport.CloseIdleConnections()
+ obsClient.conf = nil
+}
+
+// ListBuckets lists buckets.
+//
+// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order.
+func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) {
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+ output = &ListBucketsOutput{}
+ err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// CreateBucket creates a bucket.
+//
+// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS.
+func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("CreateBucketInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucket deletes a bucket.
+//
+// You can use this API to delete a bucket. The bucket to be deleted must be empty
+// (containing no objects, noncurrent object versions, or part fragments).
+func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketStoragePolicy sets bucket storage class.
+//
+// You can use this API to set storage class for bucket.
+func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketStoragePolicyInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
+ output = &GetBucketStoragePolicyOutput{}
+ var outputS3 *getBucketStoragePolicyOutputS3
+ outputS3 = &getBucketStoragePolicyOutputS3{}
+ err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions)
+ if err != nil {
+ output = nil
+ return
+ }
+ output.BaseModel = outputS3.BaseModel
+ output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass)
+ return
+}
+
+func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
+ output = &GetBucketStoragePolicyOutput{}
+ var outputObs *getBucketStoragePolicyOutputObs
+ outputObs = &getBucketStoragePolicyOutputObs{}
+ err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions)
+ if err != nil {
+ output = nil
+ return
+ }
+ output.BaseModel = outputObs.BaseModel
+ output.StorageClass = outputObs.StorageClass
+ return
+}
+
+// GetBucketStoragePolicy gets bucket storage class.
+//
+// You can use this API to obtain the storage class of a bucket.
+func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
+ if obsClient.conf.signature == SignatureObs {
+ return obsClient.getBucketStoragePolicyObs(bucketName, extensions)
+ }
+ return obsClient.getBucketStoragePolicyS3(bucketName, extensions)
+}
+
+// ListObjects lists objects in a bucket.
+//
+// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed.
+func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListObjectsInput is nil")
+ }
+ output = &ListObjectsOutput{}
+ err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+// ListVersions lists versioning objects in a bucket.
+//
+// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed.
+func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListVersionsInput is nil")
+ }
+ output = &ListVersionsOutput{}
+ err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+// ListMultipartUploads lists the multipart uploads.
+//
+// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket.
+func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListMultipartUploadsInput is nil")
+ }
+ output = &ListMultipartUploadsOutput{}
+ err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketQuota sets the bucket quota.
+//
+// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1.
+func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketQuotaInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketQuota gets the bucket quota.
+//
+// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota.
+func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) {
+ output = &GetBucketQuotaOutput{}
+ err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// HeadBucket checks whether a bucket exists.
+//
+// You can use this API to check whether a bucket exists.
+func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// HeadObject checks whether an object exists.
+//
+// You can use this API to check whether an object exists.
+func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("HeadObjectInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketMetadata gets the metadata of a bucket.
+//
+// You can use this API to send a HEAD request to a bucket to obtain the bucket
+// metadata such as the storage class and CORS rules (if set).
+func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) {
+ output = &GetBucketMetadataOutput{}
+ err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetBucketMetadataOutput(output)
+ }
+ return
+}
+
+// SetObjectMetadata sets object metadata.
+func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) {
+ output = &SetObjectMetadataOutput{}
+ err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseSetObjectMetadataOutput(output)
+ }
+ return
+}
+
+// GetBucketStorageInfo gets storage information about a bucket.
+//
+// You can use this API to obtain storage information about a bucket, including the
+// bucket size and number of objects in the bucket.
+func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) {
+ output = &GetBucketStorageInfoOutput{}
+ err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
+ output = &GetBucketLocationOutput{}
+ var outputS3 *getBucketLocationOutputS3
+ outputS3 = &getBucketLocationOutputS3{}
+ err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ output.BaseModel = outputS3.BaseModel
+ output.Location = outputS3.Location
+ }
+ return
+}
+func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
+ output = &GetBucketLocationOutput{}
+ var outputObs *getBucketLocationOutputObs
+ outputObs = &getBucketLocationOutputObs{}
+ err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ output.BaseModel = outputObs.BaseModel
+ output.Location = outputObs.Location
+ }
+ return
+}
+
+// GetBucketLocation gets the location of a bucket.
+//
+// You can use this API to obtain the bucket location.
+func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) {
+ if obsClient.conf.signature == SignatureObs {
+ return obsClient.getBucketLocationObs(bucketName, extensions)
+ }
+ return obsClient.getBucketLocationS3(bucketName, extensions)
+}
+
+// SetBucketAcl sets the bucket ACL.
+//
+// You can use this API to set the ACL for a bucket.
+func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketAclInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) {
+ output = &GetBucketAclOutput{}
+ var outputObs *getBucketACLOutputObs
+ outputObs = &getBucketACLOutputObs{}
+ err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ output.BaseModel = outputObs.BaseModel
+ output.Owner = outputObs.Owner
+ output.Grants = make([]Grant, 0, len(outputObs.Grants))
+ for _, valGrant := range outputObs.Grants {
+ tempOutput := Grant{}
+ tempOutput.Delivered = valGrant.Delivered
+ tempOutput.Permission = valGrant.Permission
+ tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName
+ tempOutput.Grantee.ID = valGrant.Grantee.ID
+ tempOutput.Grantee.Type = valGrant.Grantee.Type
+ tempOutput.Grantee.URI = GroupAllUsers
+
+ output.Grants = append(output.Grants, tempOutput)
+ }
+ }
+ return
+}
+
+// GetBucketAcl gets the bucket ACL.
+//
+// You can use this API to obtain a bucket ACL.
+func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) {
+ output = &GetBucketAclOutput{}
+ if obsClient.conf.signature == SignatureObs {
+ return obsClient.getBucketACLObs(bucketName, extensions)
+ }
+ err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketPolicy sets the bucket policy.
+//
+// You can use this API to set a bucket policy. If the bucket already has a policy, the
+// policy will be overwritten by the one specified in this request.
+func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketPolicy is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketPolicy gets the bucket policy.
+//
+// You can use this API to obtain the policy of a bucket.
+func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) {
+ output = &GetBucketPolicyOutput{}
+ err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketPolicy deletes the bucket policy.
+//
+// You can use this API to delete the policy of a bucket.
+func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketCors sets CORS rules for a bucket.
+//
+// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests.
+func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketCorsInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketCors gets CORS rules of a bucket.
+//
+// You can use this API to obtain the CORS rules of a specified bucket.
+func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) {
+ output = &GetBucketCorsOutput{}
+ err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketCors deletes CORS rules of a bucket.
+//
+// You can use this API to delete the CORS rules of a specified bucket.
+func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketVersioning sets the versioning status for a bucket.
+//
+// You can use this API to set the versioning status for a bucket.
+func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketVersioningInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketVersioning gets the versioning status of a bucket.
+//
+// You can use this API to obtain the versioning status of a bucket.
+func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) {
+ output = &GetBucketVersioningOutput{}
+ err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketWebsiteConfiguration sets website hosting for a bucket.
+//
+// You can use this API to set website hosting for a bucket.
+func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketWebsiteConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket.
+//
+// You can use this API to obtain the website hosting settings of a bucket.
+func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) {
+ output = &GetBucketWebsiteConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket.
+//
+// You can use this API to delete the website hosting settings of a bucket.
+func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketLoggingConfiguration sets the bucket logging.
+//
+// You can use this API to configure access logging for a bucket.
+func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketLoggingConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketLoggingConfiguration gets the logging settings of a bucket.
+//
+// You can use this API to obtain the access logging settings of a bucket.
+func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) {
+ output = &GetBucketLoggingConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket.
+//
+// You can use this API to set lifecycle rules for a bucket, to periodically transit
+// storage classes of objects and delete objects in the bucket.
+func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketLifecycleConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket.
+//
+// You can use this API to obtain the lifecycle rules of a bucket.
+func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) {
+ output = &GetBucketLifecycleConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket.
+//
+// You can use this API to delete all lifecycle rules of a bucket.
+func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketTagging sets bucket tags.
+//
+// You can use this API to set bucket tags.
+func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketTaggingInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketTagging gets bucket tags.
+//
+// You can use this API to obtain the tags of a specified bucket.
+func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) {
+ output = &GetBucketTaggingOutput{}
+ err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketTagging deletes bucket tags.
+//
+// You can use this API to delete the tags of a specified bucket.
+func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketNotification sets event notification for a bucket.
+//
+// You can use this API to configure event notification for a bucket. You will be notified of all
+// specified operations performed on the bucket.
+func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketNotificationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketNotification gets event notification settings of a bucket.
+//
+// You can use this API to obtain the event notification configuration of a bucket.
+func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) {
+ if obsClient.conf.signature != SignatureObs {
+ return obsClient.getBucketNotificationS3(bucketName, extensions)
+ }
+ output = &GetBucketNotificationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) {
+ outputS3 := &getBucketNotificationOutputS3{}
+ err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ output = &GetBucketNotificationOutput{}
+ output.BaseModel = outputS3.BaseModel
+ topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations))
+ for _, topicConfigurationS3 := range outputS3.TopicConfigurations {
+ topicConfiguration := TopicConfiguration{}
+ topicConfiguration.ID = topicConfigurationS3.ID
+ topicConfiguration.Topic = topicConfigurationS3.Topic
+ topicConfiguration.FilterRules = topicConfigurationS3.FilterRules
+
+ events := make([]EventType, 0, len(topicConfigurationS3.Events))
+ for _, event := range topicConfigurationS3.Events {
+ events = append(events, ParseStringToEventType(event))
+ }
+ topicConfiguration.Events = events
+ topicConfigurations = append(topicConfigurations, topicConfiguration)
+ }
+ output.TopicConfigurations = topicConfigurations
+ return
+}
+
+// DeleteObject deletes an object.
+//
+// You can use this API to delete an object from a specified bucket.
+func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("DeleteObjectInput is nil")
+ }
+ output = &DeleteObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseDeleteObjectOutput(output)
+ }
+ return
+}
+
+// DeleteObjects deletes objects in a batch.
+//
+// You can use this API to batch delete objects from a specified bucket.
+func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("DeleteObjectsInput is nil")
+ }
+ output = &DeleteObjectsOutput{}
+ err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetObjectAcl sets ACL for an object.
+//
+// You can use this API to set the ACL for an object in a specified bucket.
+func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetObjectAclInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetObjectAcl gets the ACL of an object.
+//
+// You can use this API to obtain the ACL of an object in a specified bucket.
+func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectAclInput is nil")
+ }
+ output = &GetObjectAclOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionID[0]
+ }
+ }
+ return
+}
+
+// RestoreObject restores an object.
+func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("RestoreObjectInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetObjectMetadata gets object metadata.
+//
+// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata.
+func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectMetadataInput is nil")
+ }
+ output = &GetObjectMetadataOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectMetadataOutput(output)
+ }
+ return
+}
+
+// GetObject downloads object.
+//
+// You can use this API to download an object in a specified bucket.
+func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectInput is nil")
+ }
+ output = &GetObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectOutput(output)
+ }
+ return
+}
+
+// PutObject uploads an object to the specified bucket.
+func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("PutObjectInput is nil")
+ }
+
+ if input.ContentType == "" && input.Key != "" {
+ if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
+ input.ContentType = contentType
+ }
+ }
+ output = &PutObjectOutput{}
+ var repeatable bool
+ if input.Body != nil {
+ _, repeatable = input.Body.(*strings.Reader)
+ if input.ContentLength > 0 {
+ input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
+ }
+ }
+ if repeatable {
+ err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ } else {
+ err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ }
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) {
+ if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
+ return contentType
+ }
+ if contentType, ok := mimeTypes[strings.ToLower(sourceFile[strings.LastIndex(sourceFile, ".")+1:])]; ok {
+ return contentType
+ }
+ return
+}
+
+func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool {
+ if input.ContentType == "" && input.Key != "" {
+ return true
+ }
+ return false
+}
+
+// PutFile uploads a file to the specified bucket.
+func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("PutFileInput is nil")
+ }
+
+ var body io.Reader
+ sourceFile := strings.TrimSpace(input.SourceFile)
+ if sourceFile != "" {
+ fd, _err := os.Open(sourceFile)
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
+ }
+ }()
+
+ stat, _err := fd.Stat()
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+ if input.ContentLength > 0 {
+ if input.ContentLength > stat.Size() {
+ input.ContentLength = stat.Size()
+ }
+ fileReaderWrapper.totalCount = input.ContentLength
+ } else {
+ fileReaderWrapper.totalCount = stat.Size()
+ }
+ body = fileReaderWrapper
+ }
+
+ _input := &PutObjectInput{}
+ _input.PutObjectBasicInput = input.PutObjectBasicInput
+ _input.Body = body
+
+ if obsClient.isGetContentType(_input) {
+ _input.ContentType = obsClient.getContentType(_input, sourceFile)
+ }
+
+ output = &PutObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+// CopyObject creates a copy for an existing object.
+//
+// You can use this API to create a copy for an object in a specified bucket.
+func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CopyObjectInput is nil")
+ }
+
+ if strings.TrimSpace(input.CopySourceBucket) == "" {
+ return nil, errors.New("Source bucket is empty")
+ }
+ if strings.TrimSpace(input.CopySourceKey) == "" {
+ return nil, errors.New("Source key is empty")
+ }
+
+ output = &CopyObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyObjectOutput(output)
+ }
+ return
+}
+
+// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID.
+func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("AbortMultipartUploadInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// InitiateMultipartUpload initializes a multipart upload.
+func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) {
+ if input == nil {
+ return nil, errors.New("InitiateMultipartUploadInput is nil")
+ }
+
+ if input.ContentType == "" && input.Key != "" {
+ if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
+ input.ContentType = contentType
+ }
+ }
+
+ output = &InitiateMultipartUploadOutput{}
+ err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseInitiateMultipartUploadOutput(output)
+ }
+ return
+}
+
+// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID.
+//
+// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket
+// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB,
+// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000.
+func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) {
+ if _input == nil {
+ return nil, errors.New("UploadPartInput is nil")
+ }
+
+ if _input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+
+ input := &UploadPartInput{}
+ input.Bucket = _input.Bucket
+ input.Key = _input.Key
+ input.PartNumber = _input.PartNumber
+ input.UploadId = _input.UploadId
+ input.ContentMD5 = _input.ContentMD5
+ input.SourceFile = _input.SourceFile
+ input.Offset = _input.Offset
+ input.PartSize = _input.PartSize
+ input.SseHeader = _input.SseHeader
+ input.Body = _input.Body
+
+ output = &UploadPartOutput{}
+ var repeatable bool
+ if input.Body != nil {
+ _, repeatable = input.Body.(*strings.Reader)
+ if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 {
+ input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize}
+ }
+ } else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" {
+ fd, _err := os.Open(sourceFile)
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
+ }
+ }()
+
+ stat, _err := fd.Stat()
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ fileSize := stat.Size()
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+
+ if input.Offset < 0 || input.Offset > fileSize {
+ input.Offset = 0
+ }
+
+ if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) {
+ input.PartSize = fileSize - input.Offset
+ }
+ fileReaderWrapper.totalCount = input.PartSize
+ if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil {
+ return nil, err
+ }
+ input.Body = fileReaderWrapper
+ repeatable = true
+ }
+ if repeatable {
+ err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ } else {
+ err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ }
+ if err != nil {
+ output = nil
+ } else {
+ ParseUploadPartOutput(output)
+ output.PartNumber = input.PartNumber
+ }
+ return
+}
+
+// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID.
+func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CompleteMultipartUploadInput is nil")
+ }
+
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+
+ var parts partSlice = input.Parts
+ sort.Sort(parts)
+
+ output = &CompleteMultipartUploadOutput{}
+ err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCompleteMultipartUploadOutput(output)
+ }
+ return
+}
+
+// ListParts lists the uploaded parts in a bucket by using the multipart upload ID.
+func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListPartsInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ output = &ListPartsOutput{}
+ err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// CopyPart copy a part to a specified bucket by using a specified multipart upload ID.
+//
+// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID.
+func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CopyPartInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ if strings.TrimSpace(input.CopySourceBucket) == "" {
+ return nil, errors.New("Source bucket is empty")
+ }
+ if strings.TrimSpace(input.CopySourceKey) == "" {
+ return nil, errors.New("Source key is empty")
+ }
+
+ output = &CopyPartOutput{}
+ err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyPartOutput(output)
+ output.PartNumber = input.PartNumber
+ }
+ return
+}
+
+// SetBucketRequestPayment sets requester-pays setting for a bucket.
+func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketRequestPaymentInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketRequestPayment gets requester-pays setting of a bucket.
+func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) {
+ output = &GetBucketRequestPaymentOutput{}
+ err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// UploadFile resume uploads.
+//
+// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file
+// upload failures caused by poor network conditions and program breakdowns.
+func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
+ if input.EnableCheckpoint && input.CheckpointFile == "" {
+ input.CheckpointFile = input.UploadFile + ".uploadfile_record"
+ }
+
+ if input.TaskNum <= 0 {
+ input.TaskNum = 1
+ }
+ if input.PartSize < MIN_PART_SIZE {
+ input.PartSize = MIN_PART_SIZE
+ } else if input.PartSize > MAX_PART_SIZE {
+ input.PartSize = MAX_PART_SIZE
+ }
+
+ output, err = obsClient.resumeUpload(input, extensions)
+ return
+}
+
+// DownloadFile resume downloads.
+//
+// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file
+// download failures caused by poor network conditions and program breakdowns.
+func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
+ if input.DownloadFile == "" {
+ input.DownloadFile = input.Key
+ }
+
+ if input.EnableCheckpoint && input.CheckpointFile == "" {
+ input.CheckpointFile = input.DownloadFile + ".downloadfile_record"
+ }
+
+ if input.TaskNum <= 0 {
+ input.TaskNum = 1
+ }
+ if input.PartSize <= 0 {
+ input.PartSize = DEFAULT_PART_SIZE
+ }
+
+ output, err = obsClient.resumeDownload(input, extensions)
+ return
+}
+
+// SetBucketFetchPolicy sets the bucket fetch policy.
+//
+// You can use this API to set a bucket fetch policy.
+func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketFetchPolicyInput is nil")
+ }
+ if strings.TrimSpace(string(input.Status)) == "" {
+ return nil, errors.New("Fetch policy status is empty")
+ }
+ if strings.TrimSpace(input.Agency) == "" {
+ return nil, errors.New("Fetch policy agency is empty")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketFetchPolicy gets the bucket fetch policy.
+//
+// You can use this API to obtain the fetch policy of a bucket.
+func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetBucketFetchPolicyInput is nil")
+ }
+ output = &GetBucketFetchPolicyOutput{}
+ err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketFetchPolicy deletes the bucket fetch policy.
+//
+// You can use this API to delete the fetch policy of a bucket.
+func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("DeleteBucketFetchPolicyInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketFetchJob sets the bucket fetch job.
+//
+// You can use this API to set a bucket fetch job.
+func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketFetchJobInput is nil")
+ }
+ if strings.TrimSpace(input.URL) == "" {
+ return nil, errors.New("URL is empty")
+ }
+ output = &SetBucketFetchJobOutput{}
+ err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketFetchJob gets the bucket fetch job.
+//
+// You can use this API to obtain the fetch job of a bucket.
+func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetBucketFetchJobInput is nil")
+ }
+ if strings.TrimSpace(input.JobID) == "" {
+ return nil, errors.New("JobID is empty")
+ }
+ output = &GetBucketFetchJobOutput{}
+ err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions)
+ if err != nil {
+ output = nil
+ }
+ return
+}
diff --git a/modules/obs/conf.go b/modules/obs/conf.go
new file mode 100755
index 000000000..4b8525bfb
--- /dev/null
+++ b/modules/obs/conf.go
@@ -0,0 +1,471 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type securityProvider struct {
+ ak string
+ sk string
+ securityToken string
+}
+
+type urlHolder struct {
+ scheme string
+ host string
+ port int
+}
+
+type config struct {
+ securityProvider *securityProvider
+ urlHolder *urlHolder
+ pathStyle bool
+ cname bool
+ sslVerify bool
+ endpoint string
+ signature SignatureType
+ region string
+ connectTimeout int
+ socketTimeout int
+ headerTimeout int
+ idleConnTimeout int
+ finalTimeout int
+ maxRetryCount int
+ proxyURL string
+ maxConnsPerHost int
+ pemCerts []byte
+ transport *http.Transport
+ ctx context.Context
+ maxRedirectCount int
+}
+
+func (conf config) String() string {
+ return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+
+ "\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+
+ "\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]",
+ conf.endpoint, conf.signature, conf.pathStyle, conf.region,
+ conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
+ conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount,
+ )
+}
+
+type configurer func(conf *config)
+
+// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts.
+func WithSslVerify(sslVerify bool) configurer {
+ return WithSslVerifyAndPemCerts(sslVerify, nil)
+}
+
+// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts.
+func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
+ return func(conf *config) {
+ conf.sslVerify = sslVerify
+ conf.pemCerts = pemCerts
+ }
+}
+
+// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers.
+func WithHeaderTimeout(headerTimeout int) configurer {
+ return func(conf *config) {
+ conf.headerTimeout = headerTimeout
+ }
+}
+
+// WithProxyUrl is a configurer for ObsClient to set HTTP proxy.
+func WithProxyUrl(proxyURL string) configurer {
+ return func(conf *config) {
+ conf.proxyURL = proxyURL
+ }
+}
+
+// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections.
+func WithMaxConnections(maxConnsPerHost int) configurer {
+ return func(conf *config) {
+ conf.maxConnsPerHost = maxConnsPerHost
+ }
+}
+
+// WithPathStyle is a configurer for ObsClient.
+func WithPathStyle(pathStyle bool) configurer {
+ return func(conf *config) {
+ conf.pathStyle = pathStyle
+ }
+}
+
+// WithSignature is a configurer for ObsClient.
+func WithSignature(signature SignatureType) configurer {
+ return func(conf *config) {
+ conf.signature = signature
+ }
+}
+
+// WithRegion is a configurer for ObsClient.
+func WithRegion(region string) configurer {
+ return func(conf *config) {
+ conf.region = region
+ }
+}
+
+// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing
+// an http/https connection, in seconds.
+func WithConnectTimeout(connectTimeout int) configurer {
+ return func(conf *config) {
+ conf.connectTimeout = connectTimeout
+ }
+}
+
+// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at
+// the socket layer, in seconds.
+func WithSocketTimeout(socketTimeout int) configurer {
+ return func(conf *config) {
+ conf.socketTimeout = socketTimeout
+ }
+}
+
+// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection
+// in the connection pool, in seconds.
+func WithIdleConnTimeout(idleConnTimeout int) configurer {
+ return func(conf *config) {
+ conf.idleConnTimeout = idleConnTimeout
+ }
+}
+
+// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal.
+func WithMaxRetryCount(maxRetryCount int) configurer {
+ return func(conf *config) {
+ conf.maxRetryCount = maxRetryCount
+ }
+}
+
+// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys.
+func WithSecurityToken(securityToken string) configurer {
+ return func(conf *config) {
+ conf.securityProvider.securityToken = securityToken
+ }
+}
+
+// WithHttpTransport is a configurer for ObsClient to set the customized http Transport.
+func WithHttpTransport(transport *http.Transport) configurer {
+ return func(conf *config) {
+ conf.transport = transport
+ }
+}
+
+// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request.
+func WithRequestContext(ctx context.Context) configurer {
+ return func(conf *config) {
+ conf.ctx = ctx
+ }
+}
+
+// WithCustomDomainName is a configurer for ObsClient.
+func WithCustomDomainName(cname bool) configurer {
+ return func(conf *config) {
+ conf.cname = cname
+ }
+}
+
+// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected.
+func WithMaxRedirectCount(maxRedirectCount int) configurer {
+ return func(conf *config) {
+ conf.maxRedirectCount = maxRedirectCount
+ }
+}
+
+func (conf *config) prepareConfig() {
+ if conf.connectTimeout <= 0 {
+ conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
+ }
+
+ if conf.socketTimeout <= 0 {
+ conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
+ }
+
+ conf.finalTimeout = conf.socketTimeout * 10
+
+ if conf.headerTimeout <= 0 {
+ conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
+ }
+
+ if conf.idleConnTimeout < 0 {
+ conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
+ }
+
+ if conf.maxRetryCount < 0 {
+ conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
+ }
+
+ if conf.maxConnsPerHost <= 0 {
+ conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
+ }
+
+ if conf.maxRedirectCount < 0 {
+ conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT
+ }
+}
+
+func (conf *config) initConfigWithDefault() error {
+ conf.securityProvider.ak = strings.TrimSpace(conf.securityProvider.ak)
+ conf.securityProvider.sk = strings.TrimSpace(conf.securityProvider.sk)
+ conf.securityProvider.securityToken = strings.TrimSpace(conf.securityProvider.securityToken)
+ conf.endpoint = strings.TrimSpace(conf.endpoint)
+ if conf.endpoint == "" {
+ return errors.New("endpoint is not set")
+ }
+
+ if index := strings.Index(conf.endpoint, "?"); index > 0 {
+ conf.endpoint = conf.endpoint[:index]
+ }
+
+ for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
+ conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
+ }
+
+ if conf.signature == "" {
+ conf.signature = DEFAULT_SIGNATURE
+ }
+
+ urlHolder := &urlHolder{}
+ var address string
+ if strings.HasPrefix(conf.endpoint, "https://") {
+ urlHolder.scheme = "https"
+ address = conf.endpoint[len("https://"):]
+ } else if strings.HasPrefix(conf.endpoint, "http://") {
+ urlHolder.scheme = "http"
+ address = conf.endpoint[len("http://"):]
+ } else {
+ urlHolder.scheme = "https"
+ address = conf.endpoint
+ }
+
+ addr := strings.Split(address, ":")
+ if len(addr) == 2 {
+ if port, err := strconv.Atoi(addr[1]); err == nil {
+ urlHolder.port = port
+ }
+ }
+ urlHolder.host = addr[0]
+ if urlHolder.port == 0 {
+ if urlHolder.scheme == "https" {
+ urlHolder.port = 443
+ } else {
+ urlHolder.port = 80
+ }
+ }
+
+ if IsIP(urlHolder.host) {
+ conf.pathStyle = true
+ }
+
+ conf.urlHolder = urlHolder
+
+ conf.region = strings.TrimSpace(conf.region)
+ if conf.region == "" {
+ conf.region = DEFAULT_REGION
+ }
+
+ conf.prepareConfig()
+ conf.proxyURL = strings.TrimSpace(conf.proxyURL)
+ return nil
+}
+
+func (conf *config) getTransport() error {
+ if conf.transport == nil {
+ conf.transport = &http.Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
+ if err != nil {
+ return nil, err
+ }
+ return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
+ },
+ MaxIdleConns: conf.maxConnsPerHost,
+ MaxIdleConnsPerHost: conf.maxConnsPerHost,
+ ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
+ IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
+ }
+
+ if conf.proxyURL != "" {
+ proxyURL, err := url.Parse(conf.proxyURL)
+ if err != nil {
+ return err
+ }
+ conf.transport.Proxy = http.ProxyURL(proxyURL)
+ }
+
+ tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
+ if conf.sslVerify && conf.pemCerts != nil {
+ pool := x509.NewCertPool()
+ pool.AppendCertsFromPEM(conf.pemCerts)
+ tlsConfig.RootCAs = pool
+ }
+
+ conf.transport.TLSClientConfig = tlsConfig
+ conf.transport.DisableCompression = true
+ }
+
+ return nil
+}
+
+func checkRedirectFunc(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+}
+
+// DummyQueryEscape return the input string.
+func DummyQueryEscape(s string) string {
+ return s
+}
+
+func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) {
+ urlHolder := conf.urlHolder
+ if conf.cname {
+ requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
+ if conf.signature == "v4" {
+ canonicalizedURL = "/"
+ } else {
+ canonicalizedURL = "/" + urlHolder.host + "/"
+ }
+ } else {
+ if bucketName == "" {
+ requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
+ canonicalizedURL = "/"
+ } else {
+ if conf.pathStyle {
+ requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
+ canonicalizedURL = "/" + bucketName
+ } else {
+ requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
+ if conf.signature == "v2" || conf.signature == "OBS" {
+ canonicalizedURL = "/" + bucketName + "/"
+ } else {
+ canonicalizedURL = "/"
+ }
+ }
+ }
+ }
+ return
+}
+
+func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) {
+ if escape {
+ tempKey := []rune(objectKey)
+ result := make([]string, 0, len(tempKey))
+ for _, value := range tempKey {
+ if string(value) == "/" {
+ result = append(result, string(value))
+ } else {
+ if string(value) == " " {
+ result = append(result, url.PathEscape(string(value)))
+ } else {
+ result = append(result, url.QueryEscape(string(value)))
+ }
+ }
+ }
+ encodeObjectKey = strings.Join(result, "")
+ } else {
+ encodeObjectKey = escapeFunc(objectKey)
+ }
+ return
+}
+
+func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) {
+ if escape {
+ return url.QueryEscape
+ }
+ return DummyQueryEscape
+}
+
+func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) {
+
+ requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName)
+ var escapeFunc func(s string) string
+ escapeFunc = conf.prepareEscapeFunc(escape)
+
+ if objectKey != "" {
+ var encodeObjectKey string
+ encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc)
+ requestURL += "/" + encodeObjectKey
+ if !strings.HasSuffix(canonicalizedURL, "/") {
+ canonicalizedURL += "/"
+ }
+ canonicalizedURL += encodeObjectKey
+ }
+
+ keys := make([]string, 0, len(params))
+ for key := range params {
+ keys = append(keys, strings.TrimSpace(key))
+ }
+ sort.Strings(keys)
+ i := 0
+
+ for index, key := range keys {
+ if index == 0 {
+ requestURL += "?"
+ } else {
+ requestURL += "&"
+ }
+ _key := url.QueryEscape(key)
+ requestURL += _key
+
+ _value := params[key]
+ if conf.signature == "v4" {
+ requestURL += "=" + url.QueryEscape(_value)
+ } else {
+ if _value != "" {
+ requestURL += "=" + url.QueryEscape(_value)
+ _value = "=" + _value
+ } else {
+ _value = ""
+ }
+ lowerKey := strings.ToLower(key)
+ _, ok := allowedResourceParameterNames[lowerKey]
+ prefixHeader := HEADER_PREFIX
+ isObs := conf.signature == SignatureObs
+ if isObs {
+ prefixHeader = HEADER_PREFIX_OBS
+ }
+ ok = ok || strings.HasPrefix(lowerKey, prefixHeader)
+ if ok {
+ if i == 0 {
+ canonicalizedURL += "?"
+ } else {
+ canonicalizedURL += "&"
+ }
+ canonicalizedURL += getQueryURL(_key, _value)
+ i++
+ }
+ }
+ }
+ return
+}
+
+func getQueryURL(key, value string) string {
+ queryURL := ""
+ queryURL += key
+ queryURL += value
+ return queryURL
+}
diff --git a/modules/obs/const.go b/modules/obs/const.go
new file mode 100755
index 000000000..89f1e08eb
--- /dev/null
+++ b/modules/obs/const.go
@@ -0,0 +1,932 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+const (
+ obsSdkVersion = "3.20.9"
+ USER_AGENT = "obs-sdk-go/" + obsSdkVersion
+ HEADER_PREFIX = "x-amz-"
+ HEADER_PREFIX_META = "x-amz-meta-"
+ HEADER_PREFIX_OBS = "x-obs-"
+ HEADER_PREFIX_META_OBS = "x-obs-meta-"
+ HEADER_DATE_AMZ = "x-amz-date"
+ HEADER_DATE_OBS = "x-obs-date"
+ HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
+ HEADER_STS_TOKEN_OBS = "x-obs-security-token"
+ HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId"
+ PREFIX_META = "meta-"
+
+ HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
+ HEADER_ACL_AMZ = "x-amz-acl"
+ HEADER_ACL_OBS = "x-obs-acl"
+ HEADER_ACL = "acl"
+ HEADER_LOCATION_AMZ = "location"
+ HEADER_BUCKET_LOCATION_OBS = "bucket-location"
+ HEADER_COPY_SOURCE = "copy-source"
+ HEADER_COPY_SOURCE_RANGE = "copy-source-range"
+ HEADER_RANGE = "Range"
+ HEADER_STORAGE_CLASS = "x-default-storage-class"
+ HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class"
+ HEADER_VERSION_OBS = "version"
+ HEADER_GRANT_READ_OBS = "grant-read"
+ HEADER_GRANT_WRITE_OBS = "grant-write"
+ HEADER_GRANT_READ_ACP_OBS = "grant-read-acp"
+ HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp"
+ HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control"
+ HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered"
+ HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered"
+ HEADER_REQUEST_ID = "request-id"
+ HEADER_BUCKET_REGION = "bucket-region"
+ HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
+ HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
+ HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
+ HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
+ HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
+ HEADER_EPID_HEADERS = "epid"
+ HEADER_VERSION_ID = "version-id"
+ HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
+ HEADER_DELETE_MARKER = "delete-marker"
+ HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
+ HEADER_METADATA_DIRECTIVE = "metadata-directive"
+ HEADER_EXPIRATION = "expiration"
+ HEADER_EXPIRES_OBS = "x-obs-expires"
+ HEADER_RESTORE = "restore"
+ HEADER_OBJECT_TYPE = "object-type"
+ HEADER_NEXT_APPEND_POSITION = "next-append-position"
+ HEADER_STORAGE_CLASS2 = "storage-class"
+ HEADER_CONTENT_LENGTH = "content-length"
+ HEADER_CONTENT_TYPE = "content-type"
+ HEADER_CONTENT_LANGUAGE = "content-language"
+ HEADER_EXPIRES = "expires"
+ HEADER_CACHE_CONTROL = "cache-control"
+ HEADER_CONTENT_DISPOSITION = "content-disposition"
+ HEADER_CONTENT_ENCODING = "content-encoding"
+ HEADER_AZ_REDUNDANCY = "az-redundancy"
+ headerOefMarker = "oef-marker"
+
+ HEADER_ETAG = "etag"
+ HEADER_LASTMODIFIED = "last-modified"
+
+ HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match"
+ HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match"
+ HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since"
+ HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since"
+
+ HEADER_IF_MATCH = "If-Match"
+ HEADER_IF_NONE_MATCH = "If-None-Match"
+ HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
+ HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
+
+ HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
+ HEADER_SSEC_KEY = "server-side-encryption-customer-key"
+ HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"
+
+ HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
+ HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
+ HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id"
+
+ HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm"
+ HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key"
+ HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5"
+
+ HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"
+
+ HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id"
+
+ HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect"
+
+ HEADER_DATE_CAMEL = "Date"
+ HEADER_HOST_CAMEL = "Host"
+ HEADER_HOST = "host"
+ HEADER_AUTH_CAMEL = "Authorization"
+ HEADER_MD5_CAMEL = "Content-MD5"
+ HEADER_LOCATION_CAMEL = "Location"
+ HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
+ HEADER_CONTENT_TYPE_CAML = "Content-Type"
+ HEADER_USER_AGENT_CAMEL = "User-Agent"
+ HEADER_ORIGIN_CAMEL = "Origin"
+ HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
+ HEADER_CACHE_CONTROL_CAMEL = "Cache-Control"
+ HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition"
+ HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding"
+ HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language"
+ HEADER_EXPIRES_CAMEL = "Expires"
+
+ PARAM_VERSION_ID = "versionId"
+ PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
+ PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
+ PARAM_RESPONSE_EXPIRES = "response-expires"
+ PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
+ PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
+ PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
+ PARAM_IMAGE_PROCESS = "x-image-process"
+
+ PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
+ PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
+ PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
+ PARAM_DATE_OBS_CAMEL = "X-Obs-Date"
+ PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
+ PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
+ PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"
+
+ DEFAULT_SIGNATURE = SignatureV2
+ DEFAULT_REGION = "region"
+ DEFAULT_CONNECT_TIMEOUT = 60
+ DEFAULT_SOCKET_TIMEOUT = 60
+ DEFAULT_HEADER_TIMEOUT = 60
+ DEFAULT_IDLE_CONN_TIMEOUT = 30
+ DEFAULT_MAX_RETRY_COUNT = 3
+ DEFAULT_MAX_REDIRECT_COUNT = 3
+ DEFAULT_MAX_CONN_PER_HOST = 1000
+ EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
+ LONG_DATE_FORMAT = "20060102T150405Z"
+ SHORT_DATE_FORMAT = "20060102"
+ ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
+ ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
+ RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+ V4_SERVICE_NAME = "s3"
+ V4_SERVICE_SUFFIX = "aws4_request"
+
+ V2_HASH_PREFIX = "AWS"
+ OBS_HASH_PREFIX = "OBS"
+
+ V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
+ V4_HASH_PRE = "AWS4"
+
+ DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
+ DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms"
+
+ DEFAULT_SSE_C_ENCRYPTION = "AES256"
+
+ HTTP_GET = "GET"
+ HTTP_POST = "POST"
+ HTTP_PUT = "PUT"
+ HTTP_DELETE = "DELETE"
+ HTTP_HEAD = "HEAD"
+ HTTP_OPTIONS = "OPTIONS"
+
+ REQUEST_PAYER = "request-payer"
+ MULTI_AZ = "3az"
+
+ MAX_PART_SIZE = 5 * 1024 * 1024 * 1024
+ MIN_PART_SIZE = 100 * 1024
+ DEFAULT_PART_SIZE = 9 * 1024 * 1024
+ MAX_PART_NUM = 10000
+)
+
+// SignatureType defines type of signature
+type SignatureType string
+
+const (
+ // SignatureV2 signature type v2
+ SignatureV2 SignatureType = "v2"
+ // SignatureV4 signature type v4
+ SignatureV4 SignatureType = "v4"
+ // SignatureObs signature type OBS
+ SignatureObs SignatureType = "OBS"
+)
+
+var (
+ interestedHeaders = []string{"content-md5", "content-type", "date"}
+
+ allowedRequestHTTPHeaderMetadataNames = map[string]bool{
+ "content-type": true,
+ "content-md5": true,
+ "content-length": true,
+ "content-language": true,
+ "expires": true,
+ "origin": true,
+ "cache-control": true,
+ "content-disposition": true,
+ "content-encoding": true,
+ "access-control-request-method": true,
+ "access-control-request-headers": true,
+ "x-default-storage-class": true,
+ "location": true,
+ "date": true,
+ "etag": true,
+ "range": true,
+ "host": true,
+ "if-modified-since": true,
+ "if-unmodified-since": true,
+ "if-match": true,
+ "if-none-match": true,
+ "last-modified": true,
+ "content-range": true,
+ }
+
+ allowedResourceParameterNames = map[string]bool{
+ "acl": true,
+ "backtosource": true,
+ "metadata": true,
+ "policy": true,
+ "torrent": true,
+ "logging": true,
+ "location": true,
+ "storageinfo": true,
+ "quota": true,
+ "storageclass": true,
+ "storagepolicy": true,
+ "requestpayment": true,
+ "versions": true,
+ "versioning": true,
+ "versionid": true,
+ "uploads": true,
+ "uploadid": true,
+ "partnumber": true,
+ "website": true,
+ "notification": true,
+ "lifecycle": true,
+ "deletebucket": true,
+ "delete": true,
+ "cors": true,
+ "restore": true,
+ "tagging": true,
+ "append": true,
+ "position": true,
+ "replication": true,
+ "response-content-type": true,
+ "response-content-language": true,
+ "response-expires": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "x-image-process": true,
+ "x-oss-process": true,
+ "x-image-save-bucket": true,
+ "x-image-save-object": true,
+ "ignore-sign-in-query": true,
+ }
+
+ mimeTypes = map[string]string{
+ "001": "application/x-001",
+ "301": "application/x-301",
+ "323": "text/h323",
+ "7z": "application/x-7z-compressed",
+ "906": "application/x-906",
+ "907": "drawing/907",
+ "IVF": "video/x-ivf",
+ "a11": "application/x-a11",
+ "aac": "audio/x-aac",
+ "acp": "audio/x-mei-aac",
+ "ai": "application/postscript",
+ "aif": "audio/aiff",
+ "aifc": "audio/aiff",
+ "aiff": "audio/aiff",
+ "anv": "application/x-anv",
+ "apk": "application/vnd.android.package-archive",
+ "asa": "text/asa",
+ "asf": "video/x-ms-asf",
+ "asp": "text/asp",
+ "asx": "video/x-ms-asf",
+ "atom": "application/atom+xml",
+ "au": "audio/basic",
+ "avi": "video/avi",
+ "awf": "application/vnd.adobe.workflow",
+ "biz": "text/xml",
+ "bmp": "application/x-bmp",
+ "bot": "application/x-bot",
+ "bz2": "application/x-bzip2",
+ "c4t": "application/x-c4t",
+ "c90": "application/x-c90",
+ "cal": "application/x-cals",
+ "cat": "application/vnd.ms-pki.seccat",
+ "cdf": "application/x-netcdf",
+ "cdr": "application/x-cdr",
+ "cel": "application/x-cel",
+ "cer": "application/x-x509-ca-cert",
+ "cg4": "application/x-g4",
+ "cgm": "application/x-cgm",
+ "cit": "application/x-cit",
+ "class": "java/*",
+ "cml": "text/xml",
+ "cmp": "application/x-cmp",
+ "cmx": "application/x-cmx",
+ "cot": "application/x-cot",
+ "crl": "application/pkix-crl",
+ "crt": "application/x-x509-ca-cert",
+ "csi": "application/x-csi",
+ "css": "text/css",
+ "csv": "text/csv",
+ "cu": "application/cu-seeme",
+ "cut": "application/x-cut",
+ "dbf": "application/x-dbf",
+ "dbm": "application/x-dbm",
+ "dbx": "application/x-dbx",
+ "dcd": "text/xml",
+ "dcx": "application/x-dcx",
+ "deb": "application/x-debian-package",
+ "der": "application/x-x509-ca-cert",
+ "dgn": "application/x-dgn",
+ "dib": "application/x-dib",
+ "dll": "application/x-msdownload",
+ "doc": "application/msword",
+ "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "dot": "application/msword",
+ "drw": "application/x-drw",
+ "dtd": "text/xml",
+ "dvi": "application/x-dvi",
+ "dwf": "application/x-dwf",
+ "dwg": "application/x-dwg",
+ "dxb": "application/x-dxb",
+ "dxf": "application/x-dxf",
+ "edn": "application/vnd.adobe.edn",
+ "emf": "application/x-emf",
+ "eml": "message/rfc822",
+ "ent": "text/xml",
+ "eot": "application/vnd.ms-fontobject",
+ "epi": "application/x-epi",
+ "eps": "application/postscript",
+ "epub": "application/epub+zip",
+ "etd": "application/x-ebx",
+ "etx": "text/x-setext",
+ "exe": "application/x-msdownload",
+ "fax": "image/fax",
+ "fdf": "application/vnd.fdf",
+ "fif": "application/fractals",
+ "flac": "audio/flac",
+ "flv": "video/x-flv",
+ "fo": "text/xml",
+ "frm": "application/x-frm",
+ "g4": "application/x-g4",
+ "gbr": "application/x-gbr",
+ "gif": "image/gif",
+ "gl2": "application/x-gl2",
+ "gp4": "application/x-gp4",
+ "gz": "application/gzip",
+ "hgl": "application/x-hgl",
+ "hmr": "application/x-hmr",
+ "hpg": "application/x-hpgl",
+ "hpl": "application/x-hpl",
+ "hqx": "application/mac-binhex40",
+ "hrf": "application/x-hrf",
+ "hta": "application/hta",
+ "htc": "text/x-component",
+ "htm": "text/html",
+ "html": "text/html",
+ "htt": "text/webviewhtml",
+ "htx": "text/html",
+ "icb": "application/x-icb",
+ "ico": "application/x-ico",
+ "ics": "text/calendar",
+ "iff": "application/x-iff",
+ "ig4": "application/x-g4",
+ "igs": "application/x-igs",
+ "iii": "application/x-iphone",
+ "img": "application/x-img",
+ "ini": "text/plain",
+ "ins": "application/x-internet-signup",
+ "ipa": "application/vnd.iphone",
+ "iso": "application/x-iso9660-image",
+ "isp": "application/x-internet-signup",
+ "jar": "application/java-archive",
+ "java": "java/*",
+ "jfif": "image/jpeg",
+ "jpe": "image/jpeg",
+ "jpeg": "image/jpeg",
+ "jpg": "image/jpeg",
+ "js": "application/x-javascript",
+ "json": "application/json",
+ "jsp": "text/html",
+ "la1": "audio/x-liquid-file",
+ "lar": "application/x-laplayer-reg",
+ "latex": "application/x-latex",
+ "lavs": "audio/x-liquid-secure",
+ "lbm": "application/x-lbm",
+ "lmsff": "audio/x-la-lms",
+ "log": "text/plain",
+ "ls": "application/x-javascript",
+ "ltr": "application/x-ltr",
+ "m1v": "video/x-mpeg",
+ "m2v": "video/x-mpeg",
+ "m3u": "audio/mpegurl",
+ "m4a": "audio/mp4",
+ "m4e": "video/mpeg4",
+ "m4v": "video/mp4",
+ "mac": "application/x-mac",
+ "man": "application/x-troff-man",
+ "math": "text/xml",
+ "mdb": "application/msaccess",
+ "mfp": "application/x-shockwave-flash",
+ "mht": "message/rfc822",
+ "mhtml": "message/rfc822",
+ "mi": "application/x-mi",
+ "mid": "audio/mid",
+ "midi": "audio/mid",
+ "mil": "application/x-mil",
+ "mml": "text/xml",
+ "mnd": "audio/x-musicnet-download",
+ "mns": "audio/x-musicnet-stream",
+ "mocha": "application/x-javascript",
+ "mov": "video/quicktime",
+ "movie": "video/x-sgi-movie",
+ "mp1": "audio/mp1",
+ "mp2": "audio/mp2",
+ "mp2v": "video/mpeg",
+ "mp3": "audio/mp3",
+ "mp4": "video/mp4",
+ "mp4a": "audio/mp4",
+ "mp4v": "video/mp4",
+ "mpa": "video/x-mpg",
+ "mpd": "application/vnd.ms-project",
+ "mpe": "video/mpeg",
+ "mpeg": "video/mpeg",
+ "mpg": "video/mpeg",
+ "mpg4": "video/mp4",
+ "mpga": "audio/rn-mpeg",
+ "mpp": "application/vnd.ms-project",
+ "mps": "video/x-mpeg",
+ "mpt": "application/vnd.ms-project",
+ "mpv": "video/mpg",
+ "mpv2": "video/mpeg",
+ "mpw": "application/vnd.ms-project",
+ "mpx": "application/vnd.ms-project",
+ "mtx": "text/xml",
+ "mxp": "application/x-mmxp",
+ "net": "image/pnetvue",
+ "nrf": "application/x-nrf",
+ "nws": "message/rfc822",
+ "odc": "text/x-ms-odc",
+ "oga": "audio/ogg",
+ "ogg": "audio/ogg",
+ "ogv": "video/ogg",
+ "ogx": "application/ogg",
+ "out": "application/x-out",
+ "p10": "application/pkcs10",
+ "p12": "application/x-pkcs12",
+ "p7b": "application/x-pkcs7-certificates",
+ "p7c": "application/pkcs7-mime",
+ "p7m": "application/pkcs7-mime",
+ "p7r": "application/x-pkcs7-certreqresp",
+ "p7s": "application/pkcs7-signature",
+ "pbm": "image/x-portable-bitmap",
+ "pc5": "application/x-pc5",
+ "pci": "application/x-pci",
+ "pcl": "application/x-pcl",
+ "pcx": "application/x-pcx",
+ "pdf": "application/pdf",
+ "pdx": "application/vnd.adobe.pdx",
+ "pfx": "application/x-pkcs12",
+ "pgl": "application/x-pgl",
+ "pgm": "image/x-portable-graymap",
+ "pic": "application/x-pic",
+ "pko": "application/vnd.ms-pki.pko",
+ "pl": "application/x-perl",
+ "plg": "text/html",
+ "pls": "audio/scpls",
+ "plt": "application/x-plt",
+ "png": "image/png",
+ "pnm": "image/x-portable-anymap",
+ "pot": "application/vnd.ms-powerpoint",
+ "ppa": "application/vnd.ms-powerpoint",
+ "ppm": "application/x-ppm",
+ "pps": "application/vnd.ms-powerpoint",
+ "ppt": "application/vnd.ms-powerpoint",
+ "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ "pr": "application/x-pr",
+ "prf": "application/pics-rules",
+ "prn": "application/x-prn",
+ "prt": "application/x-prt",
+ "ps": "application/postscript",
+ "ptn": "application/x-ptn",
+ "pwz": "application/vnd.ms-powerpoint",
+ "qt": "video/quicktime",
+ "r3t": "text/vnd.rn-realtext3d",
+ "ra": "audio/vnd.rn-realaudio",
+ "ram": "audio/x-pn-realaudio",
+ "rar": "application/x-rar-compressed",
+ "ras": "application/x-ras",
+ "rat": "application/rat-file",
+ "rdf": "text/xml",
+ "rec": "application/vnd.rn-recording",
+ "red": "application/x-red",
+ "rgb": "application/x-rgb",
+ "rjs": "application/vnd.rn-realsystem-rjs",
+ "rjt": "application/vnd.rn-realsystem-rjt",
+ "rlc": "application/x-rlc",
+ "rle": "application/x-rle",
+ "rm": "application/vnd.rn-realmedia",
+ "rmf": "application/vnd.adobe.rmf",
+ "rmi": "audio/mid",
+ "rmj": "application/vnd.rn-realsystem-rmj",
+ "rmm": "audio/x-pn-realaudio",
+ "rmp": "application/vnd.rn-rn_music_package",
+ "rms": "application/vnd.rn-realmedia-secure",
+ "rmvb": "application/vnd.rn-realmedia-vbr",
+ "rmx": "application/vnd.rn-realsystem-rmx",
+ "rnx": "application/vnd.rn-realplayer",
+ "rp": "image/vnd.rn-realpix",
+ "rpm": "audio/x-pn-realaudio-plugin",
+ "rsml": "application/vnd.rn-rsml",
+ "rss": "application/rss+xml",
+ "rt": "text/vnd.rn-realtext",
+ "rtf": "application/x-rtf",
+ "rv": "video/vnd.rn-realvideo",
+ "sam": "application/x-sam",
+ "sat": "application/x-sat",
+ "sdp": "application/sdp",
+ "sdw": "application/x-sdw",
+ "sgm": "text/sgml",
+ "sgml": "text/sgml",
+ "sis": "application/vnd.symbian.install",
+ "sisx": "application/vnd.symbian.install",
+ "sit": "application/x-stuffit",
+ "slb": "application/x-slb",
+ "sld": "application/x-sld",
+ "slk": "drawing/x-slk",
+ "smi": "application/smil",
+ "smil": "application/smil",
+ "smk": "application/x-smk",
+ "snd": "audio/basic",
+ "sol": "text/plain",
+ "sor": "text/plain",
+ "spc": "application/x-pkcs7-certificates",
+ "spl": "application/futuresplash",
+ "spp": "text/xml",
+ "ssm": "application/streamingmedia",
+ "sst": "application/vnd.ms-pki.certstore",
+ "stl": "application/vnd.ms-pki.stl",
+ "stm": "text/html",
+ "sty": "application/x-sty",
+ "svg": "image/svg+xml",
+ "swf": "application/x-shockwave-flash",
+ "tar": "application/x-tar",
+ "tdf": "application/x-tdf",
+ "tg4": "application/x-tg4",
+ "tga": "application/x-tga",
+ "tif": "image/tiff",
+ "tiff": "image/tiff",
+ "tld": "text/xml",
+ "top": "drawing/x-top",
+ "torrent": "application/x-bittorrent",
+ "tsd": "text/xml",
+ "ttf": "application/x-font-ttf",
+ "txt": "text/plain",
+ "uin": "application/x-icq",
+ "uls": "text/iuls",
+ "vcf": "text/x-vcard",
+ "vda": "application/x-vda",
+ "vdx": "application/vnd.visio",
+ "vml": "text/xml",
+ "vpg": "application/x-vpeg005",
+ "vsd": "application/vnd.visio",
+ "vss": "application/vnd.visio",
+ "vst": "application/x-vst",
+ "vsw": "application/vnd.visio",
+ "vsx": "application/vnd.visio",
+ "vtx": "application/vnd.visio",
+ "vxml": "text/xml",
+ "wav": "audio/wav",
+ "wax": "audio/x-ms-wax",
+ "wb1": "application/x-wb1",
+ "wb2": "application/x-wb2",
+ "wb3": "application/x-wb3",
+ "wbmp": "image/vnd.wap.wbmp",
+ "webm": "video/webm",
+ "wiz": "application/msword",
+ "wk3": "application/x-wk3",
+ "wk4": "application/x-wk4",
+ "wkq": "application/x-wkq",
+ "wks": "application/x-wks",
+ "wm": "video/x-ms-wm",
+ "wma": "audio/x-ms-wma",
+ "wmd": "application/x-ms-wmd",
+ "wmf": "application/x-wmf",
+ "wml": "text/vnd.wap.wml",
+ "wmv": "video/x-ms-wmv",
+ "wmx": "video/x-ms-wmx",
+ "wmz": "application/x-ms-wmz",
+ "woff": "application/x-font-woff",
+ "wp6": "application/x-wp6",
+ "wpd": "application/x-wpd",
+ "wpg": "application/x-wpg",
+ "wpl": "application/vnd.ms-wpl",
+ "wq1": "application/x-wq1",
+ "wr1": "application/x-wr1",
+ "wri": "application/x-wri",
+ "wrk": "application/x-wrk",
+ "ws": "application/x-ws",
+ "ws2": "application/x-ws",
+ "wsc": "text/scriptlet",
+ "wsdl": "text/xml",
+ "wvx": "video/x-ms-wvx",
+ "x_b": "application/x-x_b",
+ "x_t": "application/x-x_t",
+ "xap": "application/x-silverlight-app",
+ "xbm": "image/x-xbitmap",
+ "xdp": "application/vnd.adobe.xdp",
+ "xdr": "text/xml",
+ "xfd": "application/vnd.adobe.xfd",
+ "xfdf": "application/vnd.adobe.xfdf",
+ "xhtml": "text/html",
+ "xls": "application/vnd.ms-excel",
+ "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "xlw": "application/x-xlw",
+ "xml": "text/xml",
+ "xpl": "audio/scpls",
+ "xpm": "image/x-xpixmap",
+ "xq": "text/xml",
+ "xql": "text/xml",
+ "xquery": "text/xml",
+ "xsd": "text/xml",
+ "xsl": "text/xml",
+ "xslt": "text/xml",
+ "xwd": "application/x-xwd",
+ "yaml": "text/yaml",
+ "yml": "text/yaml",
+ "zip": "application/zip",
+ }
+)
+
+// HttpMethodType defines http method type
+type HttpMethodType string
+
+const (
+ HttpMethodGet HttpMethodType = HTTP_GET
+ HttpMethodPut HttpMethodType = HTTP_PUT
+ HttpMethodPost HttpMethodType = HTTP_POST
+ HttpMethodDelete HttpMethodType = HTTP_DELETE
+ HttpMethodHead HttpMethodType = HTTP_HEAD
+ HttpMethodOptions HttpMethodType = HTTP_OPTIONS
+)
+
+// SubResourceType defines the subResource value
+type SubResourceType string
+
+const (
+ // SubResourceStoragePolicy subResource value: storagePolicy
+ SubResourceStoragePolicy SubResourceType = "storagePolicy"
+
+ // SubResourceStorageClass subResource value: storageClass
+ SubResourceStorageClass SubResourceType = "storageClass"
+
+ // SubResourceQuota subResource value: quota
+ SubResourceQuota SubResourceType = "quota"
+
+ // SubResourceStorageInfo subResource value: storageinfo
+ SubResourceStorageInfo SubResourceType = "storageinfo"
+
+ // SubResourceLocation subResource value: location
+ SubResourceLocation SubResourceType = "location"
+
+ // SubResourceAcl subResource value: acl
+ SubResourceAcl SubResourceType = "acl"
+
+ // SubResourcePolicy subResource value: policy
+ SubResourcePolicy SubResourceType = "policy"
+
+ // SubResourceCors subResource value: cors
+ SubResourceCors SubResourceType = "cors"
+
+ // SubResourceVersioning subResource value: versioning
+ SubResourceVersioning SubResourceType = "versioning"
+
+ // SubResourceWebsite subResource value: website
+ SubResourceWebsite SubResourceType = "website"
+
+ // SubResourceLogging subResource value: logging
+ SubResourceLogging SubResourceType = "logging"
+
+ // SubResourceLifecycle subResource value: lifecycle
+ SubResourceLifecycle SubResourceType = "lifecycle"
+
+ // SubResourceNotification subResource value: notification
+ SubResourceNotification SubResourceType = "notification"
+
+ // SubResourceTagging subResource value: tagging
+ SubResourceTagging SubResourceType = "tagging"
+
+ // SubResourceDelete subResource value: delete
+ SubResourceDelete SubResourceType = "delete"
+
+ // SubResourceVersions subResource value: versions
+ SubResourceVersions SubResourceType = "versions"
+
+ // SubResourceUploads subResource value: uploads
+ SubResourceUploads SubResourceType = "uploads"
+
+ // SubResourceRestore subResource value: restore
+ SubResourceRestore SubResourceType = "restore"
+
+ // SubResourceMetadata subResource value: metadata
+ SubResourceMetadata SubResourceType = "metadata"
+
+ // SubResourceRequestPayment subResource value: requestPayment
+ SubResourceRequestPayment SubResourceType = "requestPayment"
+)
+
+// objectKeyType defines the objectKey value
+type objectKeyType string
+
+const (
+ // objectKeyExtensionPolicy objectKey value: v1/extension_policy
+ objectKeyExtensionPolicy objectKeyType = "v1/extension_policy"
+
+ // objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs
+ objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs"
+)
+
+// AclType defines bucket/object acl type
+type AclType string
+
+const (
+ AclPrivate AclType = "private"
+ AclPublicRead AclType = "public-read"
+ AclPublicReadWrite AclType = "public-read-write"
+ AclAuthenticatedRead AclType = "authenticated-read"
+ AclBucketOwnerRead AclType = "bucket-owner-read"
+ AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
+ AclLogDeliveryWrite AclType = "log-delivery-write"
+ AclPublicReadDelivery AclType = "public-read-delivered"
+ AclPublicReadWriteDelivery AclType = "public-read-write-delivered"
+)
+
+// StorageClassType defines bucket storage class
+type StorageClassType string
+
+const (
+ //StorageClassStandard storage class: STANDARD
+ StorageClassStandard StorageClassType = "STANDARD"
+
+ //StorageClassWarm storage class: WARM
+ StorageClassWarm StorageClassType = "WARM"
+
+ //StorageClassCold storage class: COLD
+ StorageClassCold StorageClassType = "COLD"
+
+ storageClassStandardIA StorageClassType = "STANDARD_IA"
+ storageClassGlacier StorageClassType = "GLACIER"
+)
+
+// PermissionType defines permission type
+type PermissionType string
+
+const (
+ // PermissionRead permission type: READ
+ PermissionRead PermissionType = "READ"
+
+ // PermissionWrite permission type: WRITE
+ PermissionWrite PermissionType = "WRITE"
+
+ // PermissionReadAcp permission type: READ_ACP
+ PermissionReadAcp PermissionType = "READ_ACP"
+
+ // PermissionWriteAcp permission type: WRITE_ACP
+ PermissionWriteAcp PermissionType = "WRITE_ACP"
+
+ // PermissionFullControl permission type: FULL_CONTROL
+ PermissionFullControl PermissionType = "FULL_CONTROL"
+)
+
+// GranteeType defines grantee type
+type GranteeType string
+
+const (
+ // GranteeGroup grantee type: Group
+ GranteeGroup GranteeType = "Group"
+
+ // GranteeUser grantee type: CanonicalUser
+ GranteeUser GranteeType = "CanonicalUser"
+)
+
+// GroupUriType defines grantee uri type
+type GroupUriType string
+
+const (
+ // GroupAllUsers grantee uri type: AllUsers
+ GroupAllUsers GroupUriType = "AllUsers"
+
+ // GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers
+ GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers"
+
+ // GroupLogDelivery grantee uri type: LogDelivery
+ GroupLogDelivery GroupUriType = "LogDelivery"
+)
+
+// VersioningStatusType defines bucket version status
+type VersioningStatusType string
+
+const (
+ // VersioningStatusEnabled version status: Enabled
+ VersioningStatusEnabled VersioningStatusType = "Enabled"
+
+ // VersioningStatusSuspended version status: Suspended
+ VersioningStatusSuspended VersioningStatusType = "Suspended"
+)
+
+// ProtocolType defines protocol type
+type ProtocolType string
+
+const (
+ // ProtocolHttp prorocol type: http
+ ProtocolHttp ProtocolType = "http"
+
+ // ProtocolHttps prorocol type: https
+ ProtocolHttps ProtocolType = "https"
+)
+
+// RuleStatusType defines lifeCycle rule status
+type RuleStatusType string
+
+const (
+ // RuleStatusEnabled rule status: Enabled
+ RuleStatusEnabled RuleStatusType = "Enabled"
+
+ // RuleStatusDisabled rule status: Disabled
+ RuleStatusDisabled RuleStatusType = "Disabled"
+)
+
+// RestoreTierType defines restore options
+type RestoreTierType string
+
+const (
+ // RestoreTierExpedited restore options: Expedited
+ RestoreTierExpedited RestoreTierType = "Expedited"
+
+ // RestoreTierStandard restore options: Standard
+ RestoreTierStandard RestoreTierType = "Standard"
+
+ // RestoreTierBulk restore options: Bulk
+ RestoreTierBulk RestoreTierType = "Bulk"
+)
+
+// MetadataDirectiveType defines metadata operation indicator
+type MetadataDirectiveType string
+
+const (
+ // CopyMetadata metadata operation: COPY
+ CopyMetadata MetadataDirectiveType = "COPY"
+
+ // ReplaceNew metadata operation: REPLACE_NEW
+ ReplaceNew MetadataDirectiveType = "REPLACE_NEW"
+
+ // ReplaceMetadata metadata operation: REPLACE
+ ReplaceMetadata MetadataDirectiveType = "REPLACE"
+)
+
+// EventType defines bucket notification type of events
+type EventType string
+
+const (
+ // ObjectCreatedAll type of events: ObjectCreated:*
+ ObjectCreatedAll EventType = "ObjectCreated:*"
+
+ // ObjectCreatedPut type of events: ObjectCreated:Put
+ ObjectCreatedPut EventType = "ObjectCreated:Put"
+
+ // ObjectCreatedPost type of events: ObjectCreated:Post
+ ObjectCreatedPost EventType = "ObjectCreated:Post"
+
+ // ObjectCreatedCopy type of events: ObjectCreated:Copy
+ ObjectCreatedCopy EventType = "ObjectCreated:Copy"
+
+ // ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload
+ ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload"
+
+ // ObjectRemovedAll type of events: ObjectRemoved:*
+ ObjectRemovedAll EventType = "ObjectRemoved:*"
+
+ // ObjectRemovedDelete type of events: ObjectRemoved:Delete
+ ObjectRemovedDelete EventType = "ObjectRemoved:Delete"
+
+ // ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated
+ ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated"
+)
+
+// PayerType defines type of payer
+type PayerType string
+
+const (
+ // BucketOwnerPayer type of payer: BucketOwner
+ BucketOwnerPayer PayerType = "BucketOwner"
+
+ // RequesterPayer type of payer: Requester
+ RequesterPayer PayerType = "Requester"
+
+ // Requester header for requester-Pays
+ Requester PayerType = "requester"
+)
+
+// FetchPolicyStatusType defines type of fetch policy status
+type FetchPolicyStatusType string
+
+const (
+ // FetchStatusOpen type of status: open
+ FetchStatusOpen FetchPolicyStatusType = "open"
+
+ // FetchStatusClosed type of status: closed
+ FetchStatusClosed FetchPolicyStatusType = "closed"
+)
diff --git a/modules/obs/convert.go b/modules/obs/convert.go
new file mode 100755
index 000000000..bd859556b
--- /dev/null
+++ b/modules/obs/convert.go
@@ -0,0 +1,880 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func cleanHeaderPrefix(header http.Header) map[string][]string {
+ responseHeaders := make(map[string][]string)
+ for key, value := range header {
+ if len(value) > 0 {
+ key = strings.ToLower(key)
+ if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) {
+ key = key[len(HEADER_PREFIX):]
+ }
+ responseHeaders[key] = value
+ }
+ }
+ return responseHeaders
+}
+
+// ParseStringToEventType converts string value to EventType value and returns it
+func ParseStringToEventType(value string) (ret EventType) {
+ switch value {
+ case "ObjectCreated:*", "s3:ObjectCreated:*":
+ ret = ObjectCreatedAll
+ case "ObjectCreated:Put", "s3:ObjectCreated:Put":
+ ret = ObjectCreatedPut
+ case "ObjectCreated:Post", "s3:ObjectCreated:Post":
+ ret = ObjectCreatedPost
+ case "ObjectCreated:Copy", "s3:ObjectCreated:Copy":
+ ret = ObjectCreatedCopy
+ case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload":
+ ret = ObjectCreatedCompleteMultipartUpload
+ case "ObjectRemoved:*", "s3:ObjectRemoved:*":
+ ret = ObjectRemovedAll
+ case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete":
+ ret = ObjectRemovedDelete
+ case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated":
+ ret = ObjectRemovedDeleteMarkerCreated
+ default:
+ ret = ""
+ }
+ return
+}
+
+// ParseStringToStorageClassType converts string value to StorageClassType value and returns it
+func ParseStringToStorageClassType(value string) (ret StorageClassType) {
+ switch value {
+ case "STANDARD":
+ ret = StorageClassStandard
+ case "STANDARD_IA", "WARM":
+ ret = StorageClassWarm
+ case "GLACIER", "COLD":
+ ret = StorageClassCold
+ default:
+ ret = ""
+ }
+ return
+}
+
+func prepareGrantURI(grant Grant) string {
+ if grant.Grantee.URI == GroupAllUsers || grant.Grantee.URI == GroupAuthenticatedUsers {
+ return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/global/", grant.Grantee.URI)
+ }
+ if grant.Grantee.URI == GroupLogDelivery {
+ return fmt.Sprintf("%s%s", "http://acs.amazonaws.com/groups/s3/", grant.Grantee.URI)
+ }
+ return fmt.Sprintf("%s", grant.Grantee.URI)
+}
+
+func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string {
+ xml := make([]string, 0, 4)
+
+ if grant.Grantee.Type == GranteeUser {
+ if isObs {
+ xml = append(xml, "")
+ } else {
+ xml = append(xml, fmt.Sprintf("", grant.Grantee.Type))
+ }
+ if grant.Grantee.ID != "" {
+ granteeID := XmlTranscoding(grant.Grantee.ID)
+ xml = append(xml, fmt.Sprintf("%s", granteeID))
+ }
+ if !isObs && grant.Grantee.DisplayName != "" {
+ granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName)
+ xml = append(xml, fmt.Sprintf("%s", granteeDisplayName))
+ }
+ xml = append(xml, "")
+ } else {
+ if !isObs {
+ xml = append(xml, fmt.Sprintf("", grant.Grantee.Type))
+ xml = append(xml, prepareGrantURI(grant))
+ xml = append(xml, "")
+ } else if grant.Grantee.URI == GroupAllUsers {
+ xml = append(xml, "")
+ xml = append(xml, fmt.Sprintf("Everyone"))
+ xml = append(xml, "")
+ } else {
+ return strings.Join(xml, "")
+ }
+ }
+
+ xml = append(xml, fmt.Sprintf("%s", grant.Permission))
+ if isObs && isBucket {
+ xml = append(xml, fmt.Sprintf("%t", grant.Delivered))
+ }
+ xml = append(xml, fmt.Sprintf(""))
+ return strings.Join(xml, "")
+}
+
+func hasLoggingTarget(input BucketLoggingStatus) bool {
+ if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 {
+ return true
+ }
+ return false
+}
+
+// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it
+func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) {
+ grantsLength := len(input.TargetGrants)
+ xml := make([]string, 0, 8+grantsLength)
+
+ xml = append(xml, "")
+ if isObs && input.Agency != "" {
+ agency := XmlTranscoding(input.Agency)
+ xml = append(xml, fmt.Sprintf("%s", agency))
+ }
+ if hasLoggingTarget(input) {
+ xml = append(xml, "")
+ if input.TargetBucket != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.TargetBucket))
+ }
+ if input.TargetPrefix != "" {
+ targetPrefix := XmlTranscoding(input.TargetPrefix)
+ xml = append(xml, fmt.Sprintf("%s", targetPrefix))
+ }
+ if grantsLength > 0 {
+ xml = append(xml, "")
+ for _, grant := range input.TargetGrants {
+ xml = append(xml, convertGrantToXML(grant, isObs, false))
+ }
+ xml = append(xml, "")
+ }
+
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it
+func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
+ xml := make([]string, 0, 4+len(input.Grants))
+ ownerID := XmlTranscoding(input.Owner.ID)
+ xml = append(xml, fmt.Sprintf("%s", ownerID))
+ if !isObs && input.Owner.DisplayName != "" {
+ ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
+ xml = append(xml, fmt.Sprintf("%s", ownerDisplayName))
+ }
+ if isObs && input.Delivered != "" {
+ objectDelivered := XmlTranscoding(input.Delivered)
+ xml = append(xml, fmt.Sprintf("%s", objectDelivered))
+ } else {
+ xml = append(xml, "")
+ }
+ for _, grant := range input.Grants {
+ xml = append(xml, convertGrantToXML(grant, isObs, false))
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
+ xml := make([]string, 0, 4+len(input.Grants))
+ ownerID := XmlTranscoding(input.Owner.ID)
+ xml = append(xml, fmt.Sprintf("%s", ownerID))
+ if !isObs && input.Owner.DisplayName != "" {
+ ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
+ xml = append(xml, fmt.Sprintf("%s", ownerDisplayName))
+ }
+
+ xml = append(xml, "")
+
+ for _, grant := range input.Grants {
+ xml = append(xml, convertGrantToXML(grant, isObs, true))
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func convertConditionToXML(condition Condition) string {
+ xml := make([]string, 0, 2)
+ if condition.KeyPrefixEquals != "" {
+ keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals)
+ xml = append(xml, fmt.Sprintf("%s", keyPrefixEquals))
+ }
+ if condition.HttpErrorCodeReturnedEquals != "" {
+ xml = append(xml, fmt.Sprintf("%s", condition.HttpErrorCodeReturnedEquals))
+ }
+ if len(xml) > 0 {
+ return fmt.Sprintf("%s", strings.Join(xml, ""))
+ }
+ return ""
+}
+
+func prepareRoutingRule(input BucketWebsiteConfiguration) string {
+ xml := make([]string, 0, len(input.RoutingRules)*10)
+ for _, routingRule := range input.RoutingRules {
+ xml = append(xml, "")
+ xml = append(xml, "")
+ if routingRule.Redirect.Protocol != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.Protocol))
+ }
+ if routingRule.Redirect.HostName != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HostName))
+ }
+ if routingRule.Redirect.ReplaceKeyPrefixWith != "" {
+ replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith)
+ xml = append(xml, fmt.Sprintf("%s", replaceKeyPrefixWith))
+ }
+
+ if routingRule.Redirect.ReplaceKeyWith != "" {
+ replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith)
+ xml = append(xml, fmt.Sprintf("%s", replaceKeyWith))
+ }
+ if routingRule.Redirect.HttpRedirectCode != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HttpRedirectCode))
+ }
+ xml = append(xml, "")
+
+ if ret := convertConditionToXML(routingRule.Condition); ret != "" {
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ }
+ return strings.Join(xml, "")
+}
+
+// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it
+func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) {
+ routingRuleLength := len(input.RoutingRules)
+ xml := make([]string, 0, 6+routingRuleLength*10)
+ xml = append(xml, "")
+
+ if input.RedirectAllRequestsTo.HostName != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.HostName))
+ if input.RedirectAllRequestsTo.Protocol != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.Protocol))
+ }
+ xml = append(xml, "")
+ } else {
+ if input.IndexDocument.Suffix != "" {
+ indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix)
+ xml = append(xml, fmt.Sprintf("%s", indexDocumentSuffix))
+ }
+ if input.ErrorDocument.Key != "" {
+ errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key)
+ xml = append(xml, fmt.Sprintf("%s", errorDocumentKey))
+ }
+ if routingRuleLength > 0 {
+ xml = append(xml, "")
+ xml = append(xml, prepareRoutingRule(input))
+ xml = append(xml, "")
+ }
+ }
+
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func convertTransitionsToXML(transitions []Transition, isObs bool) string {
+ if length := len(transitions); length > 0 {
+ xml := make([]string, 0, length)
+ for _, transition := range transitions {
+ var temp string
+ if transition.Days > 0 {
+ temp = fmt.Sprintf("%d", transition.Days)
+ } else if !transition.Date.IsZero() {
+ temp = fmt.Sprintf("%s", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
+ }
+ if temp != "" {
+ if !isObs {
+ storageClass := string(transition.StorageClass)
+ if transition.StorageClass == StorageClassWarm {
+ storageClass = string(storageClassStandardIA)
+ } else if transition.StorageClass == StorageClassCold {
+ storageClass = string(storageClassGlacier)
+ }
+ xml = append(xml, fmt.Sprintf("%s%s", temp, storageClass))
+ } else {
+ xml = append(xml, fmt.Sprintf("%s%s", temp, transition.StorageClass))
+ }
+ }
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+
+func convertExpirationToXML(expiration Expiration) string {
+ if expiration.Days > 0 {
+ return fmt.Sprintf("%d", expiration.Days)
+ } else if !expiration.Date.IsZero() {
+ return fmt.Sprintf("%s", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
+ }
+ return ""
+}
+func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string {
+ if length := len(noncurrentVersionTransitions); length > 0 {
+ xml := make([]string, 0, length)
+ for _, noncurrentVersionTransition := range noncurrentVersionTransitions {
+ if noncurrentVersionTransition.NoncurrentDays > 0 {
+ storageClass := string(noncurrentVersionTransition.StorageClass)
+ if !isObs {
+ if storageClass == string(StorageClassWarm) {
+ storageClass = string(storageClassStandardIA)
+ } else if storageClass == string(StorageClassCold) {
+ storageClass = string(storageClassGlacier)
+ }
+ }
+ xml = append(xml, fmt.Sprintf("%d"+
+ "%s",
+ noncurrentVersionTransition.NoncurrentDays, storageClass))
+ }
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string {
+ if noncurrentVersionExpiration.NoncurrentDays > 0 {
+ return fmt.Sprintf("%d", noncurrentVersionExpiration.NoncurrentDays)
+ }
+ return ""
+}
+
+// ConvertLifecyleConfigurationToXml converts BucketLifecyleConfiguration value to XML data and returns it
+func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.LifecycleRules)*9)
+ xml = append(xml, "")
+ for _, lifecyleRule := range input.LifecycleRules {
+ xml = append(xml, "")
+ if lifecyleRule.ID != "" {
+ lifecyleRuleID := XmlTranscoding(lifecyleRule.ID)
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRuleID))
+ }
+ lifecyleRulePrefix := XmlTranscoding(lifecyleRule.Prefix)
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRulePrefix))
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Status))
+ if ret := convertTransitionsToXML(lifecyleRule.Transitions, isObs); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertExpirationToXML(lifecyleRule.Expiration); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertNoncurrentVersionTransitionsToXML(lifecyleRule.NoncurrentVersionTransitions, isObs); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertNoncurrentVersionExpirationToXML(lifecyleRule.NoncurrentVersionExpiration); ret != "" {
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string {
+ if length := len(filterRules); length > 0 {
+ xml := make([]string, 0, length*4)
+ for _, filterRule := range filterRules {
+ xml = append(xml, "")
+ if filterRule.Name != "" {
+ filterRuleName := XmlTranscoding(filterRule.Name)
+ xml = append(xml, fmt.Sprintf("%s", filterRuleName))
+ }
+ if filterRule.Value != "" {
+ filterRuleValue := XmlTranscoding(filterRule.Value)
+ xml = append(xml, fmt.Sprintf("%s", filterRuleValue))
+ }
+ xml = append(xml, "")
+ }
+ if !isObs {
+ return fmt.Sprintf("%s", strings.Join(xml, ""))
+ }
+ return fmt.Sprintf("", strings.Join(xml, ""))
+ }
+ return ""
+}
+
+func converntEventsToXML(events []EventType, isObs bool) string {
+ if length := len(events); length > 0 {
+ xml := make([]string, 0, length)
+ if !isObs {
+ for _, event := range events {
+ xml = append(xml, fmt.Sprintf("%s%s", "s3:", event))
+ }
+ } else {
+ for _, event := range events {
+ xml = append(xml, fmt.Sprintf("%s", event))
+ }
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+
+func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string {
+ xml := make([]string, 0, 6)
+ xml = append(xml, xmlElem)
+ if topicConfiguration.ID != "" {
+ topicConfigurationID := XmlTranscoding(topicConfiguration.ID)
+ xml = append(xml, fmt.Sprintf("%s", topicConfigurationID))
+ }
+ topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic)
+ xml = append(xml, fmt.Sprintf("%s", topicConfigurationTopic))
+
+ if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" {
+ xml = append(xml, ret)
+ }
+ tempElem := xmlElem[0:1] + "/" + xmlElem[1:]
+ xml = append(xml, tempElem)
+ return strings.Join(xml, "")
+}
+
+// ConverntObsRestoreToXml converts RestoreObjectInput value to XML data and returns it
+func ConverntObsRestoreToXml(restoreObjectInput RestoreObjectInput) string {
+ xml := make([]string, 0, 2)
+ xml = append(xml, fmt.Sprintf("%d", restoreObjectInput.Days))
+ if restoreObjectInput.Tier != "Bulk" {
+ xml = append(xml, fmt.Sprintf("%s", restoreObjectInput.Tier))
+ }
+ xml = append(xml, fmt.Sprintf(""))
+ data := strings.Join(xml, "")
+ return data
+}
+
+// ConvertNotificationToXml converts BucketNotification value to XML data and returns it
+func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.TopicConfigurations)*6)
+ xml = append(xml, "")
+ for _, topicConfiguration := range input.TopicConfigurations {
+ ret := converntConfigureToXML(topicConfiguration, "", isObs)
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it
+func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.Parts)*4)
+ xml = append(xml, "")
+ for _, part := range input.Parts {
+ xml = append(xml, "")
+ xml = append(xml, fmt.Sprintf("%d", part.PartNumber))
+ xml = append(xml, fmt.Sprintf("%s", part.ETag))
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) {
+ if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok {
+ sseCHeader := SseCHeader{Encryption: ret[0]}
+ if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok {
+ sseCHeader.KeyMD5 = ret[0]
+ }
+ sseHeader = sseCHeader
+ } else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok {
+ sseKmsHeader := SseKmsHeader{Encryption: ret[0]}
+ if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok {
+ sseKmsHeader.Key = ret[0]
+ } else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok {
+ sseKmsHeader.Key = ret[0]
+ }
+ sseHeader = sseKmsHeader
+ }
+ return
+}
+
+func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) {
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok {
+ AllowOrigin = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok {
+ AllowHeader = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok {
+ MaxAgeSeconds = StringToInt(ret[0], 0)
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok {
+ AllowMethod = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok {
+ ExposeHeader = ret[0]
+ }
+ return
+}
+
+func parseUnCommonHeader(output *GetObjectMetadataOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
+ output.WebsiteRedirectLocation = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok {
+ output.Expiration = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok {
+ output.Restore = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok {
+ output.ObjectType = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok {
+ output.NextAppendPosition = ret[0]
+ }
+}
+
+// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers
+func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) {
+ output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
+ parseUnCommonHeader(output)
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
+ output.ContentType = ret[0]
+ }
+
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok {
+ ret, err := time.Parse(time.RFC1123, ret[0])
+ if err == nil {
+ output.LastModified = ret
+ }
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok {
+ output.ContentLength = StringToInt64(ret[0], 0)
+ }
+
+ output.Metadata = make(map[string]string)
+
+ for key, value := range output.ResponseHeaders {
+ if strings.HasPrefix(key, PREFIX_META) {
+ _key := key[len(PREFIX_META):]
+ output.ResponseHeaders[_key] = value
+ output.Metadata[_key] = value[0]
+ delete(output.ResponseHeaders, key)
+ }
+ }
+
+}
+
+// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers
+func ParseCopyObjectOutput(output *CopyObjectOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok {
+ output.CopySourceVersionId = ret[0]
+ }
+}
+
+// ParsePutObjectOutput sets PutObjectOutput field values with response headers
+func ParsePutObjectOutput(output *PutObjectOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+}
+
+// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers
+func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+}
+
+// ParseUploadPartOutput sets UploadPartOutput field values with response headers
+func ParseUploadPartOutput(output *UploadPartOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+}
+
+// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers
+func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+}
+
+// ParseCopyPartOutput sets CopyPartOutput field values with response headers
+func ParseCopyPartOutput(output *CopyPartOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+}
+
+// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers
+func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) {
+ output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok {
+ output.Version = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = ret[0]
+ } else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok {
+ output.Location = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok {
+ output.Epid = ret[0]
+ }
+}
+
+func parseContentHeader(output *SetObjectMetadataOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
+ output.ContentDisposition = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
+ output.ContentEncoding = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
+ output.ContentLanguage = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
+ output.ContentType = ret[0]
+ }
+}
+
+// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers
+func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ } else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok {
+ output.MetadataDirective = MetadataDirectiveType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
+ output.CacheControl = ret[0]
+ }
+ parseContentHeader(output)
+ if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
+ output.Expires = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
+ output.WebsiteRedirectLocation = ret[0]
+ }
+ output.Metadata = make(map[string]string)
+
+ for key, value := range output.ResponseHeaders {
+ if strings.HasPrefix(key, PREFIX_META) {
+ _key := key[len(PREFIX_META):]
+ output.ResponseHeaders[_key] = value
+ output.Metadata[_key] = value[0]
+ delete(output.ResponseHeaders, key)
+ }
+ }
+}
+
+// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers
+func ParseDeleteObjectOutput(output *DeleteObjectOutput) {
+ if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionID[0]
+ }
+
+ if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
+ output.DeleteMarker = deleteMarker[0] == "true"
+ }
+}
+
+// ParseGetObjectOutput sets GetObjectOutput field values with response headers
+func ParseGetObjectOutput(output *GetObjectOutput) {
+ ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput)
+ if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
+ output.DeleteMarker = ret[0] == "true"
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
+ output.CacheControl = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
+ output.ContentDisposition = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
+ output.ContentEncoding = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
+ output.ContentLanguage = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
+ output.Expires = ret[0]
+ }
+}
+
+// ConvertRequestToIoReaderV2 converts req to XML data
+func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) {
+ data, err := TransToXml(req)
+ if err == nil {
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data))
+ }
+ return bytes.NewReader(data), Base64Md5(data), nil
+ }
+ return nil, "", err
+}
+
+// ConvertRequestToIoReader converts req to XML data
+func ConvertRequestToIoReader(req interface{}) (io.Reader, error) {
+ body, err := TransToXml(req)
+ if err == nil {
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body))
+ }
+ return bytes.NewReader(body), nil
+ }
+ return nil, err
+}
+
+// ParseResponseToBaseModel gets response from OBS
+func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) {
+ readCloser, ok := baseModel.(IReadCloser)
+ if !ok {
+ defer func() {
+ errMsg := resp.Body.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close response body")
+ }
+ }()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err == nil && len(body) > 0 {
+ if xmlResult {
+ err = ParseXml(body, baseModel)
+ } else {
+ s := reflect.TypeOf(baseModel).Elem()
+ if reflect.TypeOf(baseModel).Elem().Name() == "GetBucketPolicyOutput" {
+ for i := 0; i < s.NumField(); i++ {
+ if s.Field(i).Tag == "json:\"body\"" {
+ reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body))
+ break
+ }
+ }
+ } else {
+ err = parseJSON(body, baseModel)
+ }
+ }
+ if err != nil {
+ doLog(LEVEL_ERROR, "Unmarshal error: %v", err)
+ }
+ }
+ } else {
+ readCloser.setReadCloser(resp.Body)
+ }
+
+ baseModel.setStatusCode(resp.StatusCode)
+ responseHeaders := cleanHeaderPrefix(resp.Header)
+ baseModel.setResponseHeaders(responseHeaders)
+ if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {
+ baseModel.setRequestID(values[0])
+ }
+ return
+}
+
+// ParseResponseToObsError gets obsError from OBS
+func ParseResponseToObsError(resp *http.Response, isObs bool) error {
+ isJson := false
+ if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok {
+ jsonType, _ := mimeTypes["json"]
+ isJson = contentType[0] == jsonType
+ }
+ obsError := ObsError{}
+ respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs)
+ if respError != nil {
+ doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
+ }
+ obsError.Status = resp.Status
+ return obsError
+}
+
+// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format
+func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) {
+ fetch := map[string]SetBucketFetchPolicyInput{"fetch": input}
+ json, err := json.Marshal(fetch)
+ if err != nil {
+ return "", err
+ }
+ data = string(json)
+ return
+}
+
+// convertFetchJobToJSON converts SetBucketFetchJobInput into json format
+func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) {
+ objectHeaders := make(map[string]string)
+ for key, value := range input.ObjectHeaders {
+ if value != "" {
+ _key := strings.ToLower(key)
+ if !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
+ _key = HEADER_PREFIX_META_OBS + _key
+ }
+ objectHeaders[_key] = value
+ }
+ }
+ input.ObjectHeaders = objectHeaders
+ json, err := json.Marshal(input)
+ if err != nil {
+ return "", err
+ }
+ data = string(json)
+ return
+}
diff --git a/modules/obs/error.go b/modules/obs/error.go
new file mode 100755
index 000000000..63cb5bb03
--- /dev/null
+++ b/modules/obs/error.go
@@ -0,0 +1,35 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "encoding/xml"
+ "fmt"
+)
+
+// ObsError defines error response from OBS
+type ObsError struct {
+ BaseModel
+ Status string
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code" json:"code"`
+ Message string `xml:"Message" json:"message"`
+ Resource string `xml:"Resource"`
+ HostId string `xml:"HostId"`
+}
+
+func (err ObsError) Error() string {
+ return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s",
+ err.Status, err.Code, err.Message, err.RequestId)
+}
diff --git a/modules/obs/extension.go b/modules/obs/extension.go
new file mode 100755
index 000000000..bbf33c56b
--- /dev/null
+++ b/modules/obs/extension.go
@@ -0,0 +1,37 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "fmt"
+ "strings"
+)
+
+type extensionOptions interface{}
+type extensionHeaders func(headers map[string][]string, isObs bool) error
+
+func setHeaderPrefix(key string, value string) extensionHeaders {
+ return func(headers map[string][]string, isObs bool) error {
+ if strings.TrimSpace(value) == "" {
+ return fmt.Errorf("set header %s with empty value", key)
+ }
+ setHeaders(headers, key, []string{value}, isObs)
+ return nil
+ }
+}
+
+// WithReqPaymentHeader sets header for requester-pays
+func WithReqPaymentHeader(requester PayerType) extensionHeaders {
+ return setHeaderPrefix(REQUEST_PAYER, string(requester))
+}
diff --git a/modules/obs/http.go b/modules/obs/http.go
new file mode 100755
index 000000000..e305c14b5
--- /dev/null
+++ b/modules/obs/http.go
@@ -0,0 +1,566 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+package obs
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+)
+
+func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string {
+ _headers := make(map[string][]string, len(headers))
+ if headers != nil {
+ for key, value := range headers {
+ key = strings.TrimSpace(key)
+ if key == "" {
+ continue
+ }
+ _key := strings.ToLower(key)
+ if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
+ if !meta {
+ continue
+ }
+ if !isObs {
+ _key = HEADER_PREFIX_META + _key
+ } else {
+ _key = HEADER_PREFIX_META_OBS + _key
+ }
+ } else {
+ _key = key
+ }
+ _headers[_key] = value
+ }
+ }
+ return _headers
+}
+
+func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ return obsClient.doAction(action, method, "", "", input, output, true, true, extensions)
+}
+
+func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
+ return errors.New("Bucket is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions)
+}
+
+func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
+ return errors.New("Bucket is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions)
+}
+
+func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true, extensions)
+}
+
+func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
+ return errors.New("Bucket is empty")
+ }
+ if strings.TrimSpace(objectKey) == "" {
+ return errors.New("Key is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions)
+}
+
+func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
+ return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false, extensions)
+}
+
+func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool, extensions []extensionOptions) error {
+ if strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname {
+ return errors.New("Bucket is empty")
+ }
+ if strings.TrimSpace(objectKey) == "" {
+ return errors.New("Key is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable, extensions)
+}
+
+func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions) error {
+
+ var resp *http.Response
+ var respError error
+ doLog(LEVEL_INFO, "Enter method %s...", action)
+ start := GetCurrentTimestamp()
+
+ params, headers, data, err := input.trans(obsClient.conf.signature == SignatureObs)
+ if err != nil {
+ return err
+ }
+
+ if params == nil {
+ params = make(map[string]string)
+ }
+
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+
+ for _, extension := range extensions {
+ if extensionHeader, ok := extension.(extensionHeaders); ok {
+ _err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
+ if _err != nil {
+ doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err))
+ }
+ } else {
+ doLog(LEVEL_WARN, "Unsupported extensionOptions")
+ }
+ }
+
+ switch method {
+ case HTTP_GET:
+ resp, respError = obsClient.doHTTPGet(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_POST:
+ resp, respError = obsClient.doHTTPPost(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_PUT:
+ resp, respError = obsClient.doHTTPPut(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_DELETE:
+ resp, respError = obsClient.doHTTPDelete(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_HEAD:
+ resp, respError = obsClient.doHTTPHead(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_OPTIONS:
+ resp, respError = obsClient.doHTTPOptions(bucketName, objectKey, params, headers, data, repeatable)
+ default:
+ respError = errors.New("Unexpect http method error")
+ }
+ if respError == nil && output != nil {
+ respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
+ if respError != nil {
+ doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
+ }
+ } else {
+ doLog(LEVEL_WARN, "Do http request with error: %v", respError)
+ }
+
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
+ }
+
+ return respError
+}
+
+func (obsClient ObsClient) doHTTPGet(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPHead(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPOptions(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPDelete(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPPut(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPPost(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHTTP(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true, obsClient.conf.signature == SignatureObs), data, repeatable)
+}
+
+func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
+ req, err := http.NewRequest(method, signedURL, data)
+ if err != nil {
+ return err
+ }
+ if obsClient.conf.ctx != nil {
+ req = req.WithContext(obsClient.conf.ctx)
+ }
+ var resp *http.Response
+
+ var isSecurityToken bool
+ var securityToken string
+ var query []string
+ parmas := strings.Split(signedURL, "?")
+ if len(parmas) > 1 {
+ query = strings.Split(parmas[1], "&")
+ for _, value := range query {
+ if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
+ if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
+ securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:]
+ isSecurityToken = true
+ }
+ }
+ }
+ }
+ logSignedURL := signedURL
+ if isSecurityToken {
+ logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1)
+ }
+ doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL)
+
+ req.Header = actualSignedRequestHeaders
+ if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
+ req.Host = value[0]
+ delete(req.Header, HEADER_HOST_CAMEL)
+ } else if value, ok := req.Header[HEADER_HOST]; ok {
+ req.Host = value[0]
+ delete(req.Header, HEADER_HOST)
+ }
+
+ if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
+ } else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(req.Header, HEADER_CONTENT_LENGTH)
+ }
+
+ req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}
+ start := GetCurrentTimestamp()
+ resp, err = obsClient.httpClient.Do(req)
+ if isInfoLogEnabled() {
+ doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
+ }
+
+ var msg interface{}
+ if err != nil {
+ respError = err
+ resp = nil
+ } else {
+ doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
+ if resp.StatusCode >= 300 {
+ respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
+ msg = resp.Status
+ resp = nil
+ } else {
+ if output != nil {
+ respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
+ }
+ if respError != nil {
+ doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
+ }
+ }
+ }
+
+ if msg != nil {
+ doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
+ }
+
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
+ }
+
+ return
+}
+
+func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) {
+
+ bucketName = strings.TrimSpace(bucketName)
+
+ method = strings.ToUpper(method)
+
+ var redirectURL string
+ var requestURL string
+ maxRetryCount := obsClient.conf.maxRetryCount
+ maxRedirectCount := obsClient.conf.maxRedirectCount
+
+ var _data io.Reader
+ if data != nil {
+ if dataStr, ok := data.(string); ok {
+ doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr)
+ headers["Content-Length"] = []string{IntToString(len(dataStr))}
+ _data = strings.NewReader(dataStr)
+ } else if dataByte, ok := data.([]byte); ok {
+ doLog(LEVEL_DEBUG, "Do http request with byte array")
+ headers["Content-Length"] = []string{IntToString(len(dataByte))}
+ _data = bytes.NewReader(dataByte)
+ } else if dataReader, ok := data.(io.Reader); ok {
+ _data = dataReader
+ } else {
+ doLog(LEVEL_WARN, "Data is not a valid io.Reader")
+ return nil, errors.New("Data is not a valid io.Reader")
+ }
+ }
+
+ var lastRequest *http.Request
+ redirectFlag := false
+ for i, redirectCount := 0, 0; i <= maxRetryCount; i++ {
+ if redirectURL != "" {
+ if !redirectFlag {
+ parsedRedirectURL, err := url.Parse(redirectURL)
+ if err != nil {
+ return nil, err
+ }
+ requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host)
+ if err != nil {
+ return nil, err
+ }
+ if parsedRequestURL, err := url.Parse(requestURL); err != nil {
+ return nil, err
+ } else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" {
+ redirectURL += "?" + parsedRequestURL.RawQuery
+ }
+ }
+ requestURL = redirectURL
+ } else {
+ var err error
+ requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest(method, requestURL, _data)
+ if obsClient.conf.ctx != nil {
+ req = req.WithContext(obsClient.conf.ctx)
+ }
+ if err != nil {
+ return nil, err
+ }
+ doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method)
+
+ if isDebugLogEnabled() {
+ auth := headers[HEADER_AUTH_CAMEL]
+ delete(headers, HEADER_AUTH_CAMEL)
+
+ var isSecurityToken bool
+ var securityToken []string
+ if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
+ headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
+ } else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
+ headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
+ }
+ doLog(LEVEL_DEBUG, "Request headers: %v", headers)
+ headers[HEADER_AUTH_CAMEL] = auth
+ if isSecurityToken {
+ if obsClient.conf.signature == SignatureObs {
+ headers[HEADER_STS_TOKEN_OBS] = securityToken
+ } else {
+ headers[HEADER_STS_TOKEN_AMZ] = securityToken
+ }
+ }
+ }
+
+ for key, value := range headers {
+ if key == HEADER_HOST_CAMEL {
+ req.Host = value[0]
+ delete(headers, key)
+ } else if key == HEADER_CONTENT_LENGTH_CAMEL {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(headers, key)
+ } else {
+ req.Header[key] = value
+ }
+ }
+
+ lastRequest = req
+
+ req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}
+
+ if lastRequest != nil {
+ req.Host = lastRequest.Host
+ req.ContentLength = lastRequest.ContentLength
+ }
+
+ start := GetCurrentTimestamp()
+ resp, err = obsClient.httpClient.Do(req)
+ if isInfoLogEnabled() {
+ doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
+ }
+
+ var msg interface{}
+ if err != nil {
+ msg = err
+ respError = err
+ resp = nil
+ if !repeatable {
+ break
+ }
+ } else {
+ doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
+ if resp.StatusCode < 300 {
+ break
+ } else if !repeatable || (resp.StatusCode >= 400 && resp.StatusCode < 500) || resp.StatusCode == 304 {
+ respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
+ resp = nil
+ break
+ } else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
+ if location := resp.Header.Get(HEADER_LOCATION_CAMEL); location != "" && redirectCount < maxRedirectCount {
+ redirectURL = location
+ doLog(LEVEL_WARN, "Redirect request to %s", redirectURL)
+ msg = resp.Status
+ maxRetryCount++
+ redirectCount++
+ if resp.StatusCode == 302 && method == HTTP_GET {
+ redirectFlag = true
+ } else {
+ redirectFlag = false
+ }
+ } else {
+ respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
+ resp = nil
+ break
+ }
+ } else {
+ msg = resp.Status
+ }
+ }
+ if i != maxRetryCount {
+ if resp != nil {
+ _err := resp.Body.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close resp body")
+ }
+ resp = nil
+ }
+ if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
+ delete(headers, HEADER_AUTH_CAMEL)
+ }
+ doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
+ if r, ok := _data.(*strings.Reader); ok {
+ _, err := r.Seek(0, 0)
+ if err != nil {
+ return nil, err
+ }
+ } else if r, ok := _data.(*bytes.Reader); ok {
+ _, err := r.Seek(0, 0)
+ if err != nil {
+ return nil, err
+ }
+ } else if r, ok := _data.(*fileReaderWrapper); ok {
+ fd, err := os.Open(r.filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close with reason: %v", errMsg)
+ }
+ }()
+ fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
+ fileReaderWrapper.mark = r.mark
+ fileReaderWrapper.reader = fd
+ fileReaderWrapper.totalCount = r.totalCount
+ _data = fileReaderWrapper
+ _, err = fd.Seek(r.mark, 0)
+ if err != nil {
+ return nil, err
+ }
+ } else if r, ok := _data.(*readerWrapper); ok {
+ _, err := r.seek(0, 0)
+ if err != nil {
+ return nil, err
+ }
+ }
+ time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
+ } else {
+ doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
+ if resp != nil {
+ respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
+ resp = nil
+ }
+ }
+ }
+ return
+}
+
+type connDelegate struct {
+ conn net.Conn
+ socketTimeout time.Duration
+ finalTimeout time.Duration
+}
+
+func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
+ return &connDelegate{
+ conn: conn,
+ socketTimeout: time.Second * time.Duration(socketTimeout),
+ finalTimeout: time.Second * time.Duration(finalTimeout),
+ }
+}
+
+func (delegate *connDelegate) Read(b []byte) (n int, err error) {
+ setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
+ flag := isDebugLogEnabled()
+
+ if setReadDeadlineErr != nil && flag {
+ doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
+ }
+
+ n, err = delegate.conn.Read(b)
+ setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
+ if setReadDeadlineErr != nil && flag {
+ doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
+ }
+ return n, err
+}
+
+func (delegate *connDelegate) Write(b []byte) (n int, err error) {
+ setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
+ flag := isDebugLogEnabled()
+ if setWriteDeadlineErr != nil && flag {
+ doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
+ }
+
+ n, err = delegate.conn.Write(b)
+ finalTimeout := time.Now().Add(delegate.finalTimeout)
+ setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout)
+ if setWriteDeadlineErr != nil && flag {
+ doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
+ }
+ setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout)
+ if setReadDeadlineErr != nil && flag {
+ doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
+ }
+ return n, err
+}
+
+func (delegate *connDelegate) Close() error {
+ return delegate.conn.Close()
+}
+
+func (delegate *connDelegate) LocalAddr() net.Addr {
+ return delegate.conn.LocalAddr()
+}
+
+func (delegate *connDelegate) RemoteAddr() net.Addr {
+ return delegate.conn.RemoteAddr()
+}
+
+func (delegate *connDelegate) SetDeadline(t time.Time) error {
+ return delegate.conn.SetDeadline(t)
+}
+
+func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
+ return delegate.conn.SetReadDeadline(t)
+}
+
+func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
+ return delegate.conn.SetWriteDeadline(t)
+}
diff --git a/modules/obs/log.go b/modules/obs/log.go
new file mode 100755
index 000000000..8938e5e40
--- /dev/null
+++ b/modules/obs/log.go
@@ -0,0 +1,317 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// Level defines the level of the log
+type Level int
+
+const (
+ LEVEL_OFF Level = 500
+ LEVEL_ERROR Level = 400
+ LEVEL_WARN Level = 300
+ LEVEL_INFO Level = 200
+ LEVEL_DEBUG Level = 100
+)
+
+var logLevelMap = map[Level]string{
+ LEVEL_OFF: "[OFF]: ",
+ LEVEL_ERROR: "[ERROR]: ",
+ LEVEL_WARN: "[WARN]: ",
+ LEVEL_INFO: "[INFO]: ",
+ LEVEL_DEBUG: "[DEBUG]: ",
+}
+
+type logConfType struct {
+ level Level
+ logToConsole bool
+ logFullPath string
+ maxLogSize int64
+ backups int
+}
+
+func getDefaultLogConf() logConfType {
+ return logConfType{
+ level: LEVEL_WARN,
+ logToConsole: false,
+ logFullPath: "",
+ maxLogSize: 1024 * 1024 * 30, //30MB
+ backups: 10,
+ }
+}
+
+var logConf logConfType
+
+type loggerWrapper struct {
+ fullPath string
+ fd *os.File
+ ch chan string
+ wg sync.WaitGroup
+ queue []string
+ logger *log.Logger
+ index int
+ cacheCount int
+ closed bool
+}
+
+func (lw *loggerWrapper) doInit() {
+ lw.queue = make([]string, 0, lw.cacheCount)
+ lw.logger = log.New(lw.fd, "", 0)
+ lw.ch = make(chan string, lw.cacheCount)
+ lw.wg.Add(1)
+ go lw.doWrite()
+}
+
+func (lw *loggerWrapper) rotate() {
+ stat, err := lw.fd.Stat()
+ if err != nil {
+ _err := lw.fd.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
+ }
+ panic(err)
+ }
+ if stat.Size() >= logConf.maxLogSize {
+ _err := lw.fd.Sync()
+ if _err != nil {
+ panic(err)
+ }
+ _err = lw.fd.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
+ }
+ if lw.index > logConf.backups {
+ lw.index = 1
+ }
+ _err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
+ if _err != nil {
+ panic(err)
+ }
+ lw.index++
+
+ fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ panic(err)
+ }
+ lw.fd = fd
+ lw.logger.SetOutput(lw.fd)
+ }
+}
+
+func (lw *loggerWrapper) doFlush() {
+ lw.rotate()
+ for _, m := range lw.queue {
+ lw.logger.Println(m)
+ }
+ err := lw.fd.Sync()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (lw *loggerWrapper) doClose() {
+ lw.closed = true
+ close(lw.ch)
+ lw.wg.Wait()
+}
+
+func (lw *loggerWrapper) doWrite() {
+ defer lw.wg.Done()
+ for {
+ msg, ok := <-lw.ch
+ if !ok {
+ lw.doFlush()
+ _err := lw.fd.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
+ }
+ break
+ }
+ if len(lw.queue) >= lw.cacheCount {
+ lw.doFlush()
+ lw.queue = make([]string, 0, lw.cacheCount)
+ }
+ lw.queue = append(lw.queue, msg)
+ }
+
+}
+
+func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
+ if !lw.closed {
+ msg := fmt.Sprintf(format, v...)
+ lw.ch <- msg
+ }
+}
+
+var consoleLogger *log.Logger
+var fileLogger *loggerWrapper
+var lock = new(sync.RWMutex)
+
+func isDebugLogEnabled() bool {
+ return logConf.level <= LEVEL_DEBUG
+}
+
+func isErrorLogEnabled() bool {
+ return logConf.level <= LEVEL_ERROR
+}
+
+func isWarnLogEnabled() bool {
+ return logConf.level <= LEVEL_WARN
+}
+
+func isInfoLogEnabled() bool {
+ return logConf.level <= LEVEL_INFO
+}
+
+func reset() {
+ if fileLogger != nil {
+ fileLogger.doClose()
+ fileLogger = nil
+ }
+ consoleLogger = nil
+ logConf = getDefaultLogConf()
+}
+
+// InitLog enable logging function with default cacheCnt
+func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error {
+ return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50)
+}
+
+// InitLogWithCacheCnt enable logging function
+func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error {
+ lock.Lock()
+ defer lock.Unlock()
+ if cacheCnt <= 0 {
+ cacheCnt = 50
+ }
+ reset()
+ if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
+ _fullPath, err := filepath.Abs(fullPath)
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasSuffix(_fullPath, ".log") {
+ _fullPath += ".log"
+ }
+
+ stat, err := os.Stat(_fullPath)
+ if err == nil && stat.IsDir() {
+ return fmt.Errorf("logFullPath:[%s] is a directory", _fullPath)
+ } else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
+ return err
+ }
+
+ fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ return err
+ }
+
+ if stat == nil {
+ stat, err = os.Stat(_fullPath)
+ if err != nil {
+ _err := fd.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
+ }
+ return err
+ }
+ }
+
+ prefix := stat.Name() + "."
+ index := 1
+ var timeIndex int64 = 0
+ walkFunc := func(path string, info os.FileInfo, err error) error {
+ if err == nil {
+ if name := info.Name(); strings.HasPrefix(name, prefix) {
+ if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex {
+ timeIndex = info.ModTime().Unix()
+ index = i + 1
+ }
+ }
+ }
+ return err
+ }
+
+ if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil {
+ _err := fd.Close()
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
+ }
+ return err
+ }
+
+ fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false}
+ fileLogger.doInit()
+ }
+ if maxLogSize > 0 {
+ logConf.maxLogSize = maxLogSize
+ }
+ if backups > 0 {
+ logConf.backups = backups
+ }
+ logConf.level = level
+ if logToConsole {
+ consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
+ }
+ return nil
+}
+
+// CloseLog disable logging and synchronize cache data to log files
+func CloseLog() {
+ if logEnabled() {
+ lock.Lock()
+ defer lock.Unlock()
+ reset()
+ }
+}
+
+func logEnabled() bool {
+ return consoleLogger != nil || fileLogger != nil
+}
+
+// DoLog writes log messages to the logger
+func DoLog(level Level, format string, v ...interface{}) {
+ doLog(level, format, v...)
+}
+
+func doLog(level Level, format string, v ...interface{}) {
+ if logEnabled() && logConf.level <= level {
+ msg := fmt.Sprintf(format, v...)
+ if _, file, line, ok := runtime.Caller(1); ok {
+ index := strings.LastIndex(file, "/")
+ if index >= 0 {
+ file = file[index+1:]
+ }
+ msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
+ }
+ prefix := logLevelMap[level]
+ if consoleLogger != nil {
+ consoleLogger.Printf("%s%s", prefix, msg)
+ }
+ if fileLogger != nil {
+ nowDate := FormatUtcNow("2006-01-02T15:04:05Z")
+ fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
+ }
+ }
+}
diff --git a/modules/obs/model.go b/modules/obs/model.go
new file mode 100755
index 000000000..8752b5198
--- /dev/null
+++ b/modules/obs/model.go
@@ -0,0 +1,1236 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "time"
+)
+
+// BaseModel defines base model response from OBS
+type BaseModel struct {
+ StatusCode int `xml:"-"`
+ RequestId string `xml:"RequestId" json:"request_id"`
+ ResponseHeaders map[string][]string `xml:"-"`
+}
+
+// Bucket defines bucket properties
+type Bucket struct {
+ XMLName xml.Name `xml:"Bucket"`
+ Name string `xml:"Name"`
+ CreationDate time.Time `xml:"CreationDate"`
+ Location string `xml:"Location"`
+}
+
+// Owner defines owner properties
+type Owner struct {
+ XMLName xml.Name `xml:"Owner"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+}
+
+// Initiator defines initiator properties
+type Initiator struct {
+ XMLName xml.Name `xml:"Initiator"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+}
+
+// ListBucketsInput is the input parameter of ListBuckets function
+type ListBucketsInput struct {
+ QueryLocation bool
+}
+
+// ListBucketsOutput is the result of ListBuckets function
+type ListBucketsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListAllMyBucketsResult"`
+ Owner Owner `xml:"Owner"`
+ Buckets []Bucket `xml:"Buckets>Bucket"`
+}
+
+type bucketLocationObs struct {
+ XMLName xml.Name `xml:"Location"`
+ Location string `xml:",chardata"`
+}
+
+// BucketLocation defines bucket location configuration
+type BucketLocation struct {
+ XMLName xml.Name `xml:"CreateBucketConfiguration"`
+ Location string `xml:"LocationConstraint,omitempty"`
+}
+
+// CreateBucketInput is the input parameter of CreateBucket function
+type CreateBucketInput struct {
+ BucketLocation
+ Bucket string `xml:"-"`
+ ACL AclType `xml:"-"`
+ StorageClass StorageClassType `xml:"-"`
+ GrantReadId string `xml:"-"`
+ GrantWriteId string `xml:"-"`
+ GrantReadAcpId string `xml:"-"`
+ GrantWriteAcpId string `xml:"-"`
+ GrantFullControlId string `xml:"-"`
+ GrantReadDeliveredId string `xml:"-"`
+ GrantFullControlDeliveredId string `xml:"-"`
+ Epid string `xml:"-"`
+ AvailableZone string `xml:"-"`
+}
+
+// BucketStoragePolicy defines the bucket storage class
+type BucketStoragePolicy struct {
+ XMLName xml.Name `xml:"StoragePolicy"`
+ StorageClass StorageClassType `xml:"DefaultStorageClass"`
+}
+
+// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function
+type SetBucketStoragePolicyInput struct {
+ Bucket string `xml:"-"`
+ BucketStoragePolicy
+}
+
+type getBucketStoragePolicyOutputS3 struct {
+ BaseModel
+ BucketStoragePolicy
+}
+
+// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function
+type GetBucketStoragePolicyOutput struct {
+ BaseModel
+ StorageClass string
+}
+
+type bucketStoragePolicyObs struct {
+ XMLName xml.Name `xml:"StorageClass"`
+ StorageClass string `xml:",chardata"`
+}
+type getBucketStoragePolicyOutputObs struct {
+ BaseModel
+ bucketStoragePolicyObs
+}
+
+// ListObjsInput defines parameters for listing objects
+type ListObjsInput struct {
+ Prefix string
+ MaxKeys int
+ Delimiter string
+ Origin string
+ RequestHeader string
+}
+
+// ListObjectsInput is the input parameter of ListObjects function
+type ListObjectsInput struct {
+ ListObjsInput
+ Bucket string
+ Marker string
+}
+
+// Content defines the object content properties
+type Content struct {
+ XMLName xml.Name `xml:"Contents"`
+ Owner Owner `xml:"Owner"`
+ ETag string `xml:"ETag"`
+ Key string `xml:"Key"`
+ LastModified time.Time `xml:"LastModified"`
+ Size int64 `xml:"Size"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+// ListObjectsOutput is the result of ListObjects function
+type ListObjectsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxKeys int `xml:"MaxKeys"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ Contents []Content `xml:"Contents"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+ Location string `xml:"-"`
+}
+
+// ListVersionsInput is the input parameter of ListVersions function
+type ListVersionsInput struct {
+ ListObjsInput
+ Bucket string
+ KeyMarker string
+ VersionIdMarker string
+}
+
+// Version defines the properties of versioning objects
+type Version struct {
+ DeleteMarker
+ XMLName xml.Name `xml:"Version"`
+ ETag string `xml:"ETag"`
+ Size int64 `xml:"Size"`
+}
+
+// DeleteMarker defines the properties of versioning delete markers
+type DeleteMarker struct {
+ XMLName xml.Name `xml:"DeleteMarker"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified time.Time `xml:"LastModified"`
+ Owner Owner `xml:"Owner"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+// ListVersionsOutput is the result of ListVersions function
+type ListVersionsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListVersionsResult"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ KeyMarker string `xml:"KeyMarker"`
+ NextKeyMarker string `xml:"NextKeyMarker"`
+ VersionIdMarker string `xml:"VersionIdMarker"`
+ NextVersionIdMarker string `xml:"NextVersionIdMarker"`
+ MaxKeys int `xml:"MaxKeys"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ Versions []Version `xml:"Version"`
+ DeleteMarkers []DeleteMarker `xml:"DeleteMarker"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+ Location string `xml:"-"`
+}
+
+// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function
+type ListMultipartUploadsInput struct {
+ Bucket string
+ Prefix string
+ MaxUploads int
+ Delimiter string
+ KeyMarker string
+ UploadIdMarker string
+}
+
+// Upload defines multipart upload properties
+type Upload struct {
+ XMLName xml.Name `xml:"Upload"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ Initiated time.Time `xml:"Initiated"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+ Owner Owner `xml:"Owner"`
+ Initiator Initiator `xml:"Initiator"`
+}
+
+// ListMultipartUploadsOutput is the result of ListMultipartUploads function
+type ListMultipartUploadsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListMultipartUploadsResult"`
+ Bucket string `xml:"Bucket"`
+ KeyMarker string `xml:"KeyMarker"`
+ NextKeyMarker string `xml:"NextKeyMarker"`
+ UploadIdMarker string `xml:"UploadIdMarker"`
+ NextUploadIdMarker string `xml:"NextUploadIdMarker"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ MaxUploads int `xml:"MaxUploads"`
+ Prefix string `xml:"Prefix"`
+ Uploads []Upload `xml:"Upload"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+}
+
+// BucketQuota defines bucket quota configuration
+type BucketQuota struct {
+ XMLName xml.Name `xml:"Quota"`
+ Quota int64 `xml:"StorageQuota"`
+}
+
+// SetBucketQuotaInput is the input parameter of SetBucketQuota function
+type SetBucketQuotaInput struct {
+ Bucket string `xml:"-"`
+ BucketQuota
+}
+
+// GetBucketQuotaOutput is the result of GetBucketQuota function
+type GetBucketQuotaOutput struct {
+ BaseModel
+ BucketQuota
+}
+
+// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function
+type GetBucketStorageInfoOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"GetBucketStorageInfoResult"`
+ Size int64 `xml:"Size"`
+ ObjectNumber int `xml:"ObjectNumber"`
+}
+
+type getBucketLocationOutputS3 struct {
+ BaseModel
+ BucketLocation
+}
+type getBucketLocationOutputObs struct {
+ BaseModel
+ bucketLocationObs
+}
+
+// GetBucketLocationOutput is the result of GetBucketLocation function
+type GetBucketLocationOutput struct {
+ BaseModel
+ Location string `xml:"-"`
+}
+
+// Grantee defines grantee properties
+type Grantee struct {
+ XMLName xml.Name `xml:"Grantee"`
+ Type GranteeType `xml:"type,attr"`
+ ID string `xml:"ID,omitempty"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+ URI GroupUriType `xml:"URI,omitempty"`
+}
+
+type granteeObs struct {
+ XMLName xml.Name `xml:"Grantee"`
+ Type GranteeType `xml:"type,attr"`
+ ID string `xml:"ID,omitempty"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+ Canned string `xml:"Canned,omitempty"`
+}
+
+// Grant defines grant properties
+type Grant struct {
+ XMLName xml.Name `xml:"Grant"`
+ Grantee Grantee `xml:"Grantee"`
+ Permission PermissionType `xml:"Permission"`
+ Delivered bool `xml:"Delivered"`
+}
+type grantObs struct {
+ XMLName xml.Name `xml:"Grant"`
+ Grantee granteeObs `xml:"Grantee"`
+ Permission PermissionType `xml:"Permission"`
+ Delivered bool `xml:"Delivered"`
+}
+
+// AccessControlPolicy defines access control policy properties
+type AccessControlPolicy struct {
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ Owner Owner `xml:"Owner"`
+ Grants []Grant `xml:"AccessControlList>Grant"`
+ Delivered string `xml:"Delivered,omitempty"`
+}
+
+type accessControlPolicyObs struct {
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ Owner Owner `xml:"Owner"`
+ Grants []grantObs `xml:"AccessControlList>Grant"`
+}
+
+// GetBucketAclOutput is the result of GetBucketAcl function
+type GetBucketAclOutput struct {
+ BaseModel
+ AccessControlPolicy
+}
+
+type getBucketACLOutputObs struct {
+ BaseModel
+ accessControlPolicyObs
+}
+
+// SetBucketAclInput is the input parameter of SetBucketAcl function
+type SetBucketAclInput struct {
+ Bucket string `xml:"-"`
+ ACL AclType `xml:"-"`
+ AccessControlPolicy
+}
+
+// SetBucketPolicyInput is the input parameter of SetBucketPolicy function
+type SetBucketPolicyInput struct {
+ Bucket string
+ Policy string
+}
+
+// GetBucketPolicyOutput is the result of GetBucketPolicy function
+type GetBucketPolicyOutput struct {
+ BaseModel
+ Policy string `json:"body"`
+}
+
+// CorsRule defines the CORS rules
+type CorsRule struct {
+ XMLName xml.Name `xml:"CORSRule"`
+ ID string `xml:"ID,omitempty"`
+ AllowedOrigin []string `xml:"AllowedOrigin"`
+ AllowedMethod []string `xml:"AllowedMethod"`
+ AllowedHeader []string `xml:"AllowedHeader,omitempty"`
+ MaxAgeSeconds int `xml:"MaxAgeSeconds"`
+ ExposeHeader []string `xml:"ExposeHeader,omitempty"`
+}
+
+// BucketCors defines the bucket CORS configuration
+type BucketCors struct {
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CorsRules []CorsRule `xml:"CORSRule"`
+}
+
+// SetBucketCorsInput is the input parameter of SetBucketCors function
+type SetBucketCorsInput struct {
+ Bucket string `xml:"-"`
+ BucketCors
+}
+
+// GetBucketCorsOutput is the result of GetBucketCors function
+type GetBucketCorsOutput struct {
+ BaseModel
+ BucketCors
+}
+
+// BucketVersioningConfiguration defines the versioning configuration
+type BucketVersioningConfiguration struct {
+ XMLName xml.Name `xml:"VersioningConfiguration"`
+ Status VersioningStatusType `xml:"Status"`
+}
+
+// SetBucketVersioningInput is the input parameter of SetBucketVersioning function
+type SetBucketVersioningInput struct {
+ Bucket string `xml:"-"`
+ BucketVersioningConfiguration
+}
+
+// GetBucketVersioningOutput is the result of GetBucketVersioning function
+type GetBucketVersioningOutput struct {
+ BaseModel
+ BucketVersioningConfiguration
+}
+
+// IndexDocument defines the default page configuration
+type IndexDocument struct {
+ Suffix string `xml:"Suffix"`
+}
+
+// ErrorDocument defines the error page configuration
+type ErrorDocument struct {
+ Key string `xml:"Key,omitempty"`
+}
+
+// Condition defines condition in RoutingRule
+type Condition struct {
+ XMLName xml.Name `xml:"Condition"`
+ KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"`
+ HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"`
+}
+
+// Redirect defines redirect in RoutingRule
+type Redirect struct {
+ XMLName xml.Name `xml:"Redirect"`
+ Protocol ProtocolType `xml:"Protocol,omitempty"`
+ HostName string `xml:"HostName,omitempty"`
+ ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"`
+ ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"`
+ HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"`
+}
+
+// RoutingRule defines routing rules
+type RoutingRule struct {
+ XMLName xml.Name `xml:"RoutingRule"`
+ Condition Condition `xml:"Condition,omitempty"`
+ Redirect Redirect `xml:"Redirect"`
+}
+
+// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration
+type RedirectAllRequestsTo struct {
+ XMLName xml.Name `xml:"RedirectAllRequestsTo"`
+ Protocol ProtocolType `xml:"Protocol,omitempty"`
+ HostName string `xml:"HostName"`
+}
+
+// BucketWebsiteConfiguration defines the bucket website configuration
+type BucketWebsiteConfiguration struct {
+ XMLName xml.Name `xml:"WebsiteConfiguration"`
+ RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
+ IndexDocument IndexDocument `xml:"IndexDocument,omitempty"`
+ ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"`
+ RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
+}
+
+// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function
+type SetBucketWebsiteConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketWebsiteConfiguration
+}
+
+// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function
+type GetBucketWebsiteConfigurationOutput struct {
+ BaseModel
+ BucketWebsiteConfiguration
+}
+
+// GetBucketMetadataInput is the input parameter of GetBucketMetadata function
+type GetBucketMetadataInput struct {
+ Bucket string
+ Origin string
+ RequestHeader string
+}
+
+// SetObjectMetadataInput is the input parameter of SetObjectMetadata function
+type SetObjectMetadataInput struct {
+ Bucket string
+ Key string
+ VersionId string
+ MetadataDirective MetadataDirectiveType
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ ContentType string
+ Expires string
+ WebsiteRedirectLocation string
+ StorageClass StorageClassType
+ Metadata map[string]string
+}
+
+//SetObjectMetadataOutput is the result of SetObjectMetadata function
+type SetObjectMetadataOutput struct {
+ BaseModel
+ MetadataDirective MetadataDirectiveType
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ ContentType string
+ Expires string
+ WebsiteRedirectLocation string
+ StorageClass StorageClassType
+ Metadata map[string]string
+}
+
+// GetBucketMetadataOutput is the result of GetBucketMetadata function
+type GetBucketMetadataOutput struct {
+ BaseModel
+ StorageClass StorageClassType
+ Location string
+ Version string
+ AllowOrigin string
+ AllowMethod string
+ AllowHeader string
+ MaxAgeSeconds int
+ ExposeHeader string
+ Epid string
+}
+
+// BucketLoggingStatus defines the bucket logging configuration
+type BucketLoggingStatus struct {
+ XMLName xml.Name `xml:"BucketLoggingStatus"`
+ Agency string `xml:"Agency,omitempty"`
+ TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"`
+ TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"`
+ TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"`
+}
+
+// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function
+type SetBucketLoggingConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketLoggingStatus
+}
+
+// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function
+type GetBucketLoggingConfigurationOutput struct {
+ BaseModel
+ BucketLoggingStatus
+}
+
+// Transition defines transition property in LifecycleRule
+type Transition struct {
+ XMLName xml.Name `xml:"Transition"`
+ Date time.Time `xml:"Date,omitempty"`
+ Days int `xml:"Days,omitempty"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+// Expiration defines expiration property in LifecycleRule
+type Expiration struct {
+ XMLName xml.Name `xml:"Expiration"`
+ Date time.Time `xml:"Date,omitempty"`
+ Days int `xml:"Days,omitempty"`
+}
+
+// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule
+type NoncurrentVersionTransition struct {
+ XMLName xml.Name `xml:"NoncurrentVersionTransition"`
+ NoncurrentDays int `xml:"NoncurrentDays"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule
+type NoncurrentVersionExpiration struct {
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
+ NoncurrentDays int `xml:"NoncurrentDays"`
+}
+
+// LifecycleRule defines lifecycle rule
+type LifecycleRule struct {
+ ID string `xml:"ID,omitempty"`
+ Prefix string `xml:"Prefix"`
+ Status RuleStatusType `xml:"Status"`
+ Transitions []Transition `xml:"Transition,omitempty"`
+ Expiration Expiration `xml:"Expiration,omitempty"`
+ NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
+ NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
+}
+
+// BucketLifecyleConfiguration defines the bucket lifecycle configuration
+type BucketLifecyleConfiguration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration"`
+ LifecycleRules []LifecycleRule `xml:"Rule"`
+}
+
+// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function
+type SetBucketLifecycleConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketLifecyleConfiguration
+}
+
+// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function
+type GetBucketLifecycleConfigurationOutput struct {
+ BaseModel
+ BucketLifecyleConfiguration
+}
+
+// Tag defines tag property in BucketTagging
+type Tag struct {
+ XMLName xml.Name `xml:"Tag"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+// BucketTagging defines the bucket tag configuration
+type BucketTagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ Tags []Tag `xml:"TagSet>Tag"`
+}
+
+// SetBucketTaggingInput is the input parameter of SetBucketTagging function
+type SetBucketTaggingInput struct {
+ Bucket string `xml:"-"`
+ BucketTagging
+}
+
+// GetBucketTaggingOutput is the result of GetBucketTagging function
+type GetBucketTaggingOutput struct {
+ BaseModel
+ BucketTagging
+}
+
+// FilterRule defines filter rule in TopicConfiguration
+type FilterRule struct {
+ XMLName xml.Name `xml:"FilterRule"`
+ Name string `xml:"Name,omitempty"`
+ Value string `xml:"Value,omitempty"`
+}
+
+// TopicConfiguration defines the topic configuration
+type TopicConfiguration struct {
+ XMLName xml.Name `xml:"TopicConfiguration"`
+ ID string `xml:"Id,omitempty"`
+ Topic string `xml:"Topic"`
+ Events []EventType `xml:"Event"`
+ FilterRules []FilterRule `xml:"Filter>Object>FilterRule"`
+}
+
+// BucketNotification defines the bucket notification configuration
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"`
+}
+
+// SetBucketNotificationInput is the input parameter of SetBucketNotification function
+type SetBucketNotificationInput struct {
+ Bucket string `xml:"-"`
+ BucketNotification
+}
+
+type topicConfigurationS3 struct {
+ XMLName xml.Name `xml:"TopicConfiguration"`
+ ID string `xml:"Id,omitempty"`
+ Topic string `xml:"Topic"`
+ Events []string `xml:"Event"`
+ FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"`
+}
+
+type bucketNotificationS3 struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"`
+}
+
+type getBucketNotificationOutputS3 struct {
+ BaseModel
+ bucketNotificationS3
+}
+
+// GetBucketNotificationOutput is the result of GetBucketNotification function
+type GetBucketNotificationOutput struct {
+ BaseModel
+ BucketNotification
+}
+
+// DeleteObjectInput is the input parameter of DeleteObject function
+type DeleteObjectInput struct {
+ Bucket string
+ Key string
+ VersionId string
+}
+
+// DeleteObjectOutput is the result of DeleteObject function
+type DeleteObjectOutput struct {
+ BaseModel
+ VersionId string
+ DeleteMarker bool
+}
+
+// ObjectToDelete defines the object property in DeleteObjectsInput
+type ObjectToDelete struct {
+ XMLName xml.Name `xml:"Object"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+// DeleteObjectsInput is the input parameter of DeleteObjects function
+type DeleteObjectsInput struct {
+ Bucket string `xml:"-"`
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool `xml:"Quiet,omitempty"`
+ Objects []ObjectToDelete `xml:"Object"`
+}
+
+// Deleted defines the deleted property in DeleteObjectsOutput
+type Deleted struct {
+ XMLName xml.Name `xml:"Deleted"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ DeleteMarker bool `xml:"DeleteMarker"`
+ DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"`
+}
+
+// Error defines the error property in DeleteObjectsOutput
+type Error struct {
+ XMLName xml.Name `xml:"Error"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+// DeleteObjectsOutput is the result of DeleteObjects function
+type DeleteObjectsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"DeleteResult"`
+ Deleteds []Deleted `xml:"Deleted"`
+ Errors []Error `xml:"Error"`
+}
+
+// SetObjectAclInput is the input parameter of SetObjectAcl function
+type SetObjectAclInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ VersionId string `xml:"-"`
+ ACL AclType `xml:"-"`
+ AccessControlPolicy
+}
+
+// GetObjectAclInput is the input parameter of GetObjectAcl function
+type GetObjectAclInput struct {
+ Bucket string
+ Key string
+ VersionId string
+}
+
+// GetObjectAclOutput is the result of GetObjectAcl function
+type GetObjectAclOutput struct {
+ BaseModel
+ VersionId string
+ AccessControlPolicy
+}
+
+// RestoreObjectInput is the input parameter of RestoreObject function
+type RestoreObjectInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ VersionId string `xml:"-"`
+ XMLName xml.Name `xml:"RestoreRequest"`
+ Days int `xml:"Days"`
+ Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"`
+}
+
+// ISseHeader defines the sse encryption header
+type ISseHeader interface {
+ GetEncryption() string
+ GetKey() string
+}
+
+// SseKmsHeader defines the SseKms header
+type SseKmsHeader struct {
+ Encryption string
+ Key string
+ isObs bool
+}
+
+// SseCHeader defines the SseC header
+type SseCHeader struct {
+ Encryption string
+ Key string
+ KeyMD5 string
+}
+
+// GetObjectMetadataInput is the input parameter of GetObjectMetadata function
+type GetObjectMetadataInput struct {
+ Bucket string
+ Key string
+ VersionId string
+ Origin string
+ RequestHeader string
+ SseHeader ISseHeader
+}
+
+// GetObjectMetadataOutput is the result of GetObjectMetadata function
+type GetObjectMetadataOutput struct {
+ BaseModel
+ VersionId string
+ WebsiteRedirectLocation string
+ Expiration string
+ Restore string
+ ObjectType string
+ NextAppendPosition string
+ StorageClass StorageClassType
+ ContentLength int64
+ ContentType string
+ ETag string
+ AllowOrigin string
+ AllowHeader string
+ AllowMethod string
+ ExposeHeader string
+ MaxAgeSeconds int
+ LastModified time.Time
+ SseHeader ISseHeader
+ Metadata map[string]string
+}
+
+// GetObjectInput is the input parameter of GetObject function
+type GetObjectInput struct {
+ GetObjectMetadataInput
+ IfMatch string
+ IfNoneMatch string
+ IfUnmodifiedSince time.Time
+ IfModifiedSince time.Time
+ RangeStart int64
+ RangeEnd int64
+ ImageProcess string
+ ResponseCacheControl string
+ ResponseContentDisposition string
+ ResponseContentEncoding string
+ ResponseContentLanguage string
+ ResponseContentType string
+ ResponseExpires string
+}
+
+// GetObjectOutput is the result of GetObject function
+type GetObjectOutput struct {
+ GetObjectMetadataOutput
+ DeleteMarker bool
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ Expires string
+ Body io.ReadCloser
+}
+
+// ObjectOperationInput defines the object operation properties
+type ObjectOperationInput struct {
+ Bucket string
+ Key string
+ ACL AclType
+ GrantReadId string
+ GrantReadAcpId string
+ GrantWriteAcpId string
+ GrantFullControlId string
+ StorageClass StorageClassType
+ WebsiteRedirectLocation string
+ Expires int64
+ SseHeader ISseHeader
+ Metadata map[string]string
+}
+
+// PutObjectBasicInput defines the basic object operation properties
+type PutObjectBasicInput struct {
+ ObjectOperationInput
+ ContentType string
+ ContentMD5 string
+ ContentLength int64
+}
+
+// PutObjectInput is the input parameter of PutObject function
+type PutObjectInput struct {
+ PutObjectBasicInput
+ Body io.Reader
+}
+
+// PutFileInput is the input parameter of PutFile function
+type PutFileInput struct {
+ PutObjectBasicInput
+ SourceFile string
+}
+
+// PutObjectOutput is the result of PutObject function
+type PutObjectOutput struct {
+ BaseModel
+ VersionId string
+ SseHeader ISseHeader
+ StorageClass StorageClassType
+ ETag string
+}
+
+// CopyObjectInput is the input parameter of CopyObject function
+type CopyObjectInput struct {
+ ObjectOperationInput
+ CopySourceBucket string
+ CopySourceKey string
+ CopySourceVersionId string
+ CopySourceIfMatch string
+ CopySourceIfNoneMatch string
+ CopySourceIfUnmodifiedSince time.Time
+ CopySourceIfModifiedSince time.Time
+ SourceSseHeader ISseHeader
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ ContentType string
+ Expires string
+ MetadataDirective MetadataDirectiveType
+ SuccessActionRedirect string
+}
+
+// CopyObjectOutput is the result of CopyObject function
+type CopyObjectOutput struct {
+ BaseModel
+ CopySourceVersionId string `xml:"-"`
+ VersionId string `xml:"-"`
+ SseHeader ISseHeader `xml:"-"`
+ XMLName xml.Name `xml:"CopyObjectResult"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+}
+
+// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function
+type AbortMultipartUploadInput struct {
+ Bucket string
+ Key string
+ UploadId string
+}
+
+// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function
+type InitiateMultipartUploadInput struct {
+ ObjectOperationInput
+ ContentType string
+}
+
+// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function
+type InitiateMultipartUploadOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ SseHeader ISseHeader
+}
+
+// UploadPartInput is the input parameter of UploadPart function
+type UploadPartInput struct {
+ Bucket string
+ Key string
+ PartNumber int
+ UploadId string
+ ContentMD5 string
+ SseHeader ISseHeader
+ Body io.Reader
+ SourceFile string
+ Offset int64
+ PartSize int64
+}
+
+// UploadPartOutput is the result of UploadPart function
+type UploadPartOutput struct {
+ BaseModel
+ PartNumber int
+ ETag string
+ SseHeader ISseHeader
+}
+
+// Part defines the part properties
+type Part struct {
+ XMLName xml.Name `xml:"Part"`
+ PartNumber int `xml:"PartNumber"`
+ ETag string `xml:"ETag"`
+ LastModified time.Time `xml:"LastModified,omitempty"`
+ Size int64 `xml:"Size,omitempty"`
+}
+
+// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function
+type CompleteMultipartUploadInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ UploadId string `xml:"-"`
+ XMLName xml.Name `xml:"CompleteMultipartUpload"`
+ Parts []Part `xml:"Part"`
+}
+
+// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function
+type CompleteMultipartUploadOutput struct {
+ BaseModel
+ VersionId string `xml:"-"`
+ SseHeader ISseHeader `xml:"-"`
+ XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
+ Location string `xml:"Location"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ ETag string `xml:"ETag"`
+}
+
+// ListPartsInput is the input parameter of ListParts function
+type ListPartsInput struct {
+ Bucket string
+ Key string
+ UploadId string
+ MaxParts int
+ PartNumberMarker int
+}
+
+// ListPartsOutput is the result of ListParts function
+type ListPartsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListPartsResult"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ PartNumberMarker int `xml:"PartNumberMarker"`
+ NextPartNumberMarker int `xml:"NextPartNumberMarker"`
+ MaxParts int `xml:"MaxParts"`
+ IsTruncated bool `xml:"IsTruncated"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+ Initiator Initiator `xml:"Initiator"`
+ Owner Owner `xml:"Owner"`
+ Parts []Part `xml:"Part"`
+}
+
+// CopyPartInput is the input parameter of CopyPart function
+type CopyPartInput struct {
+ Bucket string
+ Key string
+ UploadId string
+ PartNumber int
+ CopySourceBucket string
+ CopySourceKey string
+ CopySourceVersionId string
+ CopySourceRangeStart int64
+ CopySourceRangeEnd int64
+ SseHeader ISseHeader
+ SourceSseHeader ISseHeader
+}
+
+// CopyPartOutput is the result of CopyPart function
+type CopyPartOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"CopyPartResult"`
+ PartNumber int `xml:"-"`
+ ETag string `xml:"ETag"`
+ LastModified time.Time `xml:"LastModified"`
+ SseHeader ISseHeader `xml:"-"`
+}
+
+// CreateSignedUrlInput is the input parameter of CreateSignedUrl function
+type CreateSignedUrlInput struct {
+ Method HttpMethodType
+ Bucket string
+ Key string
+ SubResource SubResourceType
+ Expires int
+ Headers map[string]string
+ QueryParams map[string]string
+}
+
+// CreateSignedUrlOutput is the result of CreateSignedUrl function
+type CreateSignedUrlOutput struct {
+ SignedUrl string
+ ActualSignedRequestHeaders http.Header
+}
+
+// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function.
+type CreateBrowserBasedSignatureInput struct {
+ Bucket string
+ Key string
+ Expires int
+ FormParams map[string]string
+}
+
+// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function.
+type CreateBrowserBasedSignatureOutput struct {
+ OriginPolicy string
+ Policy string
+ Algorithm string
+ Credential string
+ Date string
+ Signature string
+}
+
+// HeadObjectInput is the input parameter of HeadObject function
+type HeadObjectInput struct {
+ Bucket string
+ Key string
+ VersionId string
+}
+
+// BucketPayer defines the request payment configuration
+type BucketPayer struct {
+ XMLName xml.Name `xml:"RequestPaymentConfiguration"`
+ Payer PayerType `xml:"Payer"`
+}
+
+// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function
+type SetBucketRequestPaymentInput struct {
+ Bucket string `xml:"-"`
+ BucketPayer
+}
+
+// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function
+type GetBucketRequestPaymentOutput struct {
+ BaseModel
+ BucketPayer
+}
+
+// UploadFileInput is the input parameter of UploadFile function
+type UploadFileInput struct {
+ ObjectOperationInput
+ ContentType string
+ UploadFile string
+ PartSize int64
+ TaskNum int
+ EnableCheckpoint bool
+ CheckpointFile string
+}
+
+// DownloadFileInput is the input parameter of DownloadFile function
+type DownloadFileInput struct {
+ GetObjectMetadataInput
+ IfMatch string
+ IfNoneMatch string
+ IfModifiedSince time.Time
+ IfUnmodifiedSince time.Time
+ DownloadFile string
+ PartSize int64
+ TaskNum int
+ EnableCheckpoint bool
+ CheckpointFile string
+}
+
+// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function
+type SetBucketFetchPolicyInput struct {
+ Bucket string
+ Status FetchPolicyStatusType `json:"status"`
+ Agency string `json:"agency"`
+}
+
+// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function
+type GetBucketFetchPolicyInput struct {
+ Bucket string
+}
+
+// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function
+type GetBucketFetchPolicyOutput struct {
+ BaseModel
+ FetchResponse `json:"fetch"`
+}
+
+// FetchResponse defines the response fetch policy configuration
+type FetchResponse struct {
+ Status FetchPolicyStatusType `json:"status"`
+ Agency string `json:"agency"`
+}
+
+// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function
+type DeleteBucketFetchPolicyInput struct {
+ Bucket string
+}
+
+// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function
+type SetBucketFetchJobInput struct {
+ Bucket string `json:"bucket"`
+ URL string `json:"url"`
+ Host string `json:"host,omitempty"`
+ Key string `json:"key,omitempty"`
+ Md5 string `json:"md5,omitempty"`
+ CallBackURL string `json:"callbackurl,omitempty"`
+ CallBackBody string `json:"callbackbody,omitempty"`
+ CallBackBodyType string `json:"callbackbodytype,omitempty"`
+ CallBackHost string `json:"callbackhost,omitempty"`
+ FileType string `json:"file_type,omitempty"`
+ IgnoreSameKey bool `json:"ignore_same_key,omitempty"`
+ ObjectHeaders map[string]string `json:"objectheaders,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ TrustName string `json:"trustname,omitempty"`
+}
+
+// SetBucketFetchJobOutput is the result of SetBucketFetchJob function
+type SetBucketFetchJobOutput struct {
+ BaseModel
+ SetBucketFetchJobResponse
+}
+
+// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration
+type SetBucketFetchJobResponse struct {
+ ID string `json:"id"`
+ Wait int `json:"Wait"`
+}
+
+// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function
+type GetBucketFetchJobInput struct {
+ Bucket string
+ JobID string
+}
+
+// GetBucketFetchJobOutput is the result of GetBucketFetchJob function
+type GetBucketFetchJobOutput struct {
+ BaseModel
+ GetBucketFetchJobResponse
+}
+
+// GetBucketFetchJobResponse defines the response fetch job configuration
+type GetBucketFetchJobResponse struct {
+ Err string `json:"err"`
+ Code string `json:"code"`
+ Status string `json:"status"`
+ Job JobResponse `json:"job"`
+}
+
+// JobResponse defines the response job configuration
+type JobResponse struct {
+ Bucket string `json:"bucket"`
+ URL string `json:"url"`
+ Host string `json:"host"`
+ Key string `json:"key"`
+ Md5 string `json:"md5"`
+ CallBackURL string `json:"callbackurl"`
+ CallBackBody string `json:"callbackbody"`
+ CallBackBodyType string `json:"callbackbodytype"`
+ CallBackHost string `json:"callbackhost"`
+ FileType string `json:"file_type"`
+ IgnoreSameKey bool `json:"ignore_same_key"`
+}
diff --git a/modules/obs/pool.go b/modules/obs/pool.go
new file mode 100755
index 000000000..4596f0a16
--- /dev/null
+++ b/modules/obs/pool.go
@@ -0,0 +1,543 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:structcheck, unused
+//nolint:golint, unused
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Future defines interface with function: Get
+type Future interface {
+ Get() interface{}
+}
+
+// FutureResult for task result
+type FutureResult struct {
+ result interface{}
+ resultChan chan interface{}
+ lock sync.Mutex
+}
+
+type panicResult struct {
+ presult interface{}
+}
+
+func (f *FutureResult) checkPanic() interface{} {
+ if r, ok := f.result.(panicResult); ok {
+ panic(r.presult)
+ }
+ return f.result
+}
+
+// Get gets the task result
+func (f *FutureResult) Get() interface{} {
+ if f.resultChan == nil {
+ return f.checkPanic()
+ }
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ if f.resultChan == nil {
+ return f.checkPanic()
+ }
+
+ f.result = <-f.resultChan
+ close(f.resultChan)
+ f.resultChan = nil
+ return f.checkPanic()
+}
+
+// Task defines interface with function: Run
+type Task interface {
+ Run() interface{}
+}
+
+type funcWrapper struct {
+ f func() interface{}
+}
+
+func (fw *funcWrapper) Run() interface{} {
+ if fw.f != nil {
+ return fw.f()
+ }
+ return nil
+}
+
+type taskWrapper struct {
+ t Task
+ f *FutureResult
+}
+
+func (tw *taskWrapper) Run() interface{} {
+ if tw.t != nil {
+ return tw.t.Run()
+ }
+ return nil
+}
+
+type signalTask struct {
+ id string
+}
+
+func (signalTask) Run() interface{} {
+ return nil
+}
+
+type worker struct {
+ name string
+ taskQueue chan Task
+ wg *sync.WaitGroup
+ pool *RoutinePool
+}
+
+func runTask(t Task) {
+ if tw, ok := t.(*taskWrapper); ok {
+ defer func() {
+ if r := recover(); r != nil {
+ tw.f.resultChan <- panicResult{
+ presult: r,
+ }
+ }
+ }()
+ ret := t.Run()
+ tw.f.resultChan <- ret
+ } else {
+ t.Run()
+ }
+}
+
+func (*worker) runTask(t Task) {
+ runTask(t)
+}
+
+func (w *worker) start() {
+ go func() {
+ defer func() {
+ if w.wg != nil {
+ w.wg.Done()
+ }
+ }()
+ for {
+ task, ok := <-w.taskQueue
+ if !ok {
+ break
+ }
+ w.pool.AddCurrentWorkingCnt(1)
+ w.runTask(task)
+ w.pool.AddCurrentWorkingCnt(-1)
+ if w.pool.autoTuneWorker(w) {
+ break
+ }
+ }
+ }()
+}
+
+func (w *worker) release() {
+ w.taskQueue = nil
+ w.wg = nil
+ w.pool = nil
+}
+
+// Pool defines coroutine pool interface
+type Pool interface {
+ ShutDown()
+ Submit(t Task) (Future, error)
+ SubmitFunc(f func() interface{}) (Future, error)
+ Execute(t Task)
+ ExecuteFunc(f func() interface{})
+ GetMaxWorkerCnt() int64
+ AddMaxWorkerCnt(value int64) int64
+ GetCurrentWorkingCnt() int64
+ AddCurrentWorkingCnt(value int64) int64
+ GetWorkerCnt() int64
+ AddWorkerCnt(value int64) int64
+ EnableAutoTune()
+}
+
+type basicPool struct {
+ maxWorkerCnt int64
+ workerCnt int64
+ currentWorkingCnt int64
+ isShutDown int32
+}
+
+// ErrTaskInvalid will be returned if the task is nil
+var ErrTaskInvalid = errors.New("Task is nil")
+
+func (pool *basicPool) GetCurrentWorkingCnt() int64 {
+ return atomic.LoadInt64(&pool.currentWorkingCnt)
+}
+
+func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 {
+ return atomic.AddInt64(&pool.currentWorkingCnt, value)
+}
+
+func (pool *basicPool) GetWorkerCnt() int64 {
+ return atomic.LoadInt64(&pool.workerCnt)
+}
+
+func (pool *basicPool) AddWorkerCnt(value int64) int64 {
+ return atomic.AddInt64(&pool.workerCnt, value)
+}
+
+func (pool *basicPool) GetMaxWorkerCnt() int64 {
+ return atomic.LoadInt64(&pool.maxWorkerCnt)
+}
+
+func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 {
+ return atomic.AddInt64(&pool.maxWorkerCnt, value)
+}
+
+func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool {
+ return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue)
+}
+
+func (pool *basicPool) EnableAutoTune() {
+
+}
+
+// RoutinePool defines the coroutine pool struct
+type RoutinePool struct {
+ basicPool
+ taskQueue chan Task
+ dispatchQueue chan Task
+ workers map[string]*worker
+ cacheCnt int
+ wg *sync.WaitGroup
+ lock *sync.Mutex
+ shutDownWg *sync.WaitGroup
+ autoTune int32
+}
+
+// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function
+var ErrSubmitTimeout = errors.New("Submit task timeout")
+
+// ErrPoolShutDown will be returned if RoutinePool is shutdown
+var ErrPoolShutDown = errors.New("RoutinePool is shutdown")
+
+// ErrTaskReject will be returned if submit task is rejected
+var ErrTaskReject = errors.New("Submit task is rejected")
+
+var closeQueue = signalTask{id: "closeQueue"}
+
+// NewRoutinePool creates a RoutinePool instance
+func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool {
+ if maxWorkerCnt <= 0 {
+ maxWorkerCnt = runtime.NumCPU()
+ }
+
+ pool := &RoutinePool{
+ cacheCnt: cacheCnt,
+ wg: new(sync.WaitGroup),
+ lock: new(sync.Mutex),
+ shutDownWg: new(sync.WaitGroup),
+ autoTune: 0,
+ }
+ pool.isShutDown = 0
+ pool.maxWorkerCnt += int64(maxWorkerCnt)
+ if pool.cacheCnt <= 0 {
+ pool.taskQueue = make(chan Task)
+ } else {
+ pool.taskQueue = make(chan Task, pool.cacheCnt)
+ }
+ pool.workers = make(map[string]*worker, pool.maxWorkerCnt)
+ // dispatchQueue must not have length
+ pool.dispatchQueue = make(chan Task)
+ pool.dispatcher()
+
+ return pool
+}
+
+// EnableAutoTune sets the autoTune enabled
+func (pool *RoutinePool) EnableAutoTune() {
+ atomic.StoreInt32(&pool.autoTune, 1)
+}
+
+func (pool *RoutinePool) checkStatus(t Task) error {
+ if t == nil {
+ return ErrTaskInvalid
+ }
+
+ if atomic.LoadInt32(&pool.isShutDown) == 1 {
+ return ErrPoolShutDown
+ }
+ return nil
+}
+
+func (pool *RoutinePool) dispatcher() {
+ pool.shutDownWg.Add(1)
+ go func() {
+ for {
+ task, ok := <-pool.dispatchQueue
+ if !ok {
+ break
+ }
+
+ if task == closeQueue {
+ close(pool.taskQueue)
+ pool.shutDownWg.Done()
+ continue
+ }
+
+ if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() {
+ pool.addWorker()
+ }
+
+ pool.taskQueue <- task
+ }
+ }()
+}
+
+// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it
+func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 {
+ if atomic.LoadInt32(&pool.autoTune) == 1 {
+ return pool.basicPool.AddMaxWorkerCnt(value)
+ }
+ return pool.GetMaxWorkerCnt()
+}
+
+func (pool *RoutinePool) addWorker() {
+ if atomic.LoadInt32(&pool.autoTune) == 1 {
+ pool.lock.Lock()
+ defer pool.lock.Unlock()
+ }
+ w := &worker{}
+ w.name = fmt.Sprintf("woker-%d", len(pool.workers))
+ w.taskQueue = pool.taskQueue
+ w.wg = pool.wg
+ pool.AddWorkerCnt(1)
+ w.pool = pool
+ pool.workers[w.name] = w
+ pool.wg.Add(1)
+ w.start()
+}
+
+func (pool *RoutinePool) autoTuneWorker(w *worker) bool {
+ if atomic.LoadInt32(&pool.autoTune) == 0 {
+ return false
+ }
+
+ if w == nil {
+ return false
+ }
+
+ workerCnt := pool.GetWorkerCnt()
+ maxWorkerCnt := pool.GetMaxWorkerCnt()
+ if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) {
+ pool.lock.Lock()
+ defer pool.lock.Unlock()
+ delete(pool.workers, w.name)
+ w.wg.Done()
+ w.release()
+ return true
+ }
+
+ return false
+}
+
+// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
+func (pool *RoutinePool) ExecuteFunc(f func() interface{}) {
+ fw := &funcWrapper{
+ f: f,
+ }
+ pool.Execute(fw)
+}
+
+// Execute pushes the specified task to the dispatchQueue
+func (pool *RoutinePool) Execute(t Task) {
+ if t != nil {
+ pool.dispatchQueue <- t
+ }
+}
+
+// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
+func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) {
+ fw := &funcWrapper{
+ f: f,
+ }
+ return pool.Submit(fw)
+}
+
+// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info
+func (pool *RoutinePool) Submit(t Task) (Future, error) {
+ if err := pool.checkStatus(t); err != nil {
+ return nil, err
+ }
+ f := &FutureResult{}
+ f.resultChan = make(chan interface{}, 1)
+ tw := &taskWrapper{
+ t: t,
+ f: f,
+ }
+ pool.dispatchQueue <- tw
+ return f, nil
+}
+
+// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info.
+// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time.
+func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) {
+ if timeout <= 0 {
+ return pool.Submit(t)
+ }
+ if err := pool.checkStatus(t); err != nil {
+ return nil, err
+ }
+ timeoutChan := make(chan bool, 1)
+ go func() {
+ time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout)))
+ timeoutChan <- true
+ close(timeoutChan)
+ }()
+
+ f := &FutureResult{}
+ f.resultChan = make(chan interface{}, 1)
+ tw := &taskWrapper{
+ t: t,
+ f: f,
+ }
+ select {
+ case pool.dispatchQueue <- tw:
+ return f, nil
+ case _, ok := <-timeoutChan:
+ if ok {
+ return nil, ErrSubmitTimeout
+ }
+ return nil, ErrSubmitTimeout
+ }
+}
+
+func (pool *RoutinePool) beforeCloseDispatchQueue() {
+ if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
+ return
+ }
+ pool.dispatchQueue <- closeQueue
+ pool.wg.Wait()
+}
+
+func (pool *RoutinePool) doCloseDispatchQueue() {
+ close(pool.dispatchQueue)
+ pool.shutDownWg.Wait()
+}
+
+// ShutDown closes the RoutinePool instance
+func (pool *RoutinePool) ShutDown() {
+ pool.beforeCloseDispatchQueue()
+ pool.doCloseDispatchQueue()
+ for _, w := range pool.workers {
+ w.release()
+ }
+ pool.workers = nil
+ pool.taskQueue = nil
+ pool.dispatchQueue = nil
+}
+
+// NoChanPool defines the coroutine pool struct
+type NoChanPool struct {
+ basicPool
+ wg *sync.WaitGroup
+ tokens chan interface{}
+}
+
+// NewNochanPool creates a new NoChanPool instance
+func NewNochanPool(maxWorkerCnt int) Pool {
+ if maxWorkerCnt <= 0 {
+ maxWorkerCnt = runtime.NumCPU()
+ }
+
+ pool := &NoChanPool{
+ wg: new(sync.WaitGroup),
+ tokens: make(chan interface{}, maxWorkerCnt),
+ }
+ pool.isShutDown = 0
+ pool.AddMaxWorkerCnt(int64(maxWorkerCnt))
+
+ for i := 0; i < maxWorkerCnt; i++ {
+ pool.tokens <- struct{}{}
+ }
+
+ return pool
+}
+
+func (pool *NoChanPool) acquire() {
+ <-pool.tokens
+}
+
+func (pool *NoChanPool) release() {
+ pool.tokens <- 1
+}
+
+func (pool *NoChanPool) execute(t Task) {
+ pool.wg.Add(1)
+ go func() {
+ pool.acquire()
+ defer func() {
+ pool.release()
+ pool.wg.Done()
+ }()
+ runTask(t)
+ }()
+}
+
+// ShutDown closes the NoChanPool instance
+func (pool *NoChanPool) ShutDown() {
+ if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
+ return
+ }
+ pool.wg.Wait()
+}
+
+// Execute executes the specified task
+func (pool *NoChanPool) Execute(t Task) {
+ if t != nil {
+ pool.execute(t)
+ }
+}
+
+// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
+func (pool *NoChanPool) ExecuteFunc(f func() interface{}) {
+ fw := &funcWrapper{
+ f: f,
+ }
+ pool.Execute(fw)
+}
+
+// Submit executes the specified task, and returns the FutureResult and error info
+func (pool *NoChanPool) Submit(t Task) (Future, error) {
+ if t == nil {
+ return nil, ErrTaskInvalid
+ }
+
+ f := &FutureResult{}
+ f.resultChan = make(chan interface{}, 1)
+ tw := &taskWrapper{
+ t: t,
+ f: f,
+ }
+
+ pool.execute(tw)
+ return f, nil
+}
+
+// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
+func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) {
+ fw := &funcWrapper{
+ f: f,
+ }
+ return pool.Submit(fw)
+}
diff --git a/modules/obs/temporary.go b/modules/obs/temporary.go
new file mode 100755
index 000000000..bfaeb8197
--- /dev/null
+++ b/modules/obs/temporary.go
@@ -0,0 +1,790 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error
+func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CreateSignedUrlInput is nil")
+ }
+
+ params := make(map[string]string, len(input.QueryParams))
+ for key, value := range input.QueryParams {
+ params[key] = value
+ }
+
+ if input.SubResource != "" {
+ params[string(input.SubResource)] = ""
+ }
+
+ headers := make(map[string][]string, len(input.Headers))
+ for key, value := range input.Headers {
+ headers[key] = []string{value}
+ }
+
+ if input.Expires <= 0 {
+ input.Expires = 300
+ }
+
+ requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires))
+ if err != nil {
+ return nil, err
+ }
+
+ output = &CreateSignedUrlOutput{
+ SignedUrl: requestURL,
+ ActualSignedRequestHeaders: headers,
+ }
+ return
+}
+
+func (obsClient ObsClient) isSecurityToken(params map[string]string) {
+ if obsClient.conf.securityProvider.securityToken != "" {
+ if obsClient.conf.signature == SignatureObs {
+ params[HEADER_STS_TOKEN_OBS] = obsClient.conf.securityProvider.securityToken
+ } else {
+ params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken
+ }
+ }
+}
+
+// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput,
+// and returns the CreateBrowserBasedSignatureOutput and error
+func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
+ }
+
+ params := make(map[string]string, len(input.FormParams))
+ for key, value := range input.FormParams {
+ params[key] = value
+ }
+
+ date := time.Now().UTC()
+ shortDate := date.Format(SHORT_DATE_FORMAT)
+ longDate := date.Format(LONG_DATE_FORMAT)
+
+ credential, _ := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
+
+ if input.Expires <= 0 {
+ input.Expires = 300
+ }
+
+ expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
+ if obsClient.conf.signature == SignatureV4 {
+ params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
+ params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
+ params[PARAM_DATE_AMZ_CAMEL] = longDate
+ }
+
+ obsClient.isSecurityToken(params)
+
+ matchAnyBucket := true
+ matchAnyKey := true
+ count := 5
+ if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
+ params["bucket"] = bucket
+ matchAnyBucket = false
+ count--
+ }
+
+ if key := strings.TrimSpace(input.Key); key != "" {
+ params["key"] = key
+ matchAnyKey = false
+ count--
+ }
+
+ originPolicySlice := make([]string, 0, len(params)+count)
+ originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
+ originPolicySlice = append(originPolicySlice, "\"conditions\":[")
+ for key, value := range params {
+ if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
+ originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
+ }
+ }
+
+ if matchAnyBucket {
+ originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
+ }
+
+ if matchAnyKey {
+ originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
+ }
+
+ originPolicySlice = append(originPolicySlice, "]}")
+
+ originPolicy := strings.Join(originPolicySlice, "")
+ policy := Base64Encode([]byte(originPolicy))
+ var signature string
+ if obsClient.conf.signature == SignatureV4 {
+ signature = getSignature(policy, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
+ } else {
+ signature = Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(policy)))
+ }
+
+ output = &CreateBrowserBasedSignatureOutput{
+ OriginPolicy: originPolicy,
+ Policy: policy,
+ Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
+ Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
+ Date: params[PARAM_DATE_AMZ_CAMEL],
+ Signature: signature,
+ }
+ return
+}
+
+// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers
+func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
+ output = &ListBucketsOutput{}
+ err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
+ output = &GetBucketStoragePolicyOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
+ output = &ListObjectsOutput{}
+ err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
+ output = &ListVersionsOutput{}
+ err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a
+// specified bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
+ output = &ListMultipartUploadsOutput{}
+ err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
+ output = &GetBucketQuotaOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers
+func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers
+func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
+ output = &GetBucketMetadataOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetBucketMetadataOutput(output)
+ }
+ return
+}
+
+// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
+ output = &GetBucketStorageInfoOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
+ output = &GetBucketLocationOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
+ output = &GetBucketAclOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
+ output = &GetBucketPolicyOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
+ output = &GetBucketCorsOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
+ output = &GetBucketVersioningOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
+ output = &GetBucketWebsiteConfigurationOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
+ output = &GetBucketLoggingConfigurationOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
+ output = &GetBucketLifecycleConfigurationOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
+ output = &GetBucketTaggingOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
+ output = &GetBucketNotificationOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers
+func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
+ output = &DeleteObjectOutput{}
+ err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseDeleteObjectOutput(output)
+ }
+ return
+}
+
+// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data
+func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
+ output = &DeleteObjectsOutput{}
+ err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers
+func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
+ output = &GetObjectAclOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionID[0]
+ }
+ }
+ return
+}
+
+// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data
+func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers
+func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
+ output = &GetObjectMetadataOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectMetadataOutput(output)
+ }
+ return
+}
+
+// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers
+func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
+ output = &GetObjectOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectOutput(output)
+ }
+ return
+}
+
+// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
+ output = &PutObjectOutput{}
+ err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path
+func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
+ var data io.Reader
+ sourceFile = strings.TrimSpace(sourceFile)
+ if sourceFile != "" {
+ fd, _err := os.Open(sourceFile)
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
+ }
+ }()
+
+ stat, _err := fd.Stat()
+ if _err != nil {
+ err = _err
+ return nil, err
+ }
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+
+ var contentLength int64
+ if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
+ contentLength = StringToInt64(value[0], -1)
+ } else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
+ contentLength = StringToInt64(value[0], -1)
+ } else {
+ contentLength = stat.Size()
+ }
+ if contentLength > stat.Size() {
+ return nil, errors.New("ContentLength is larger than fileSize")
+ }
+ fileReaderWrapper.totalCount = contentLength
+ data = fileReaderWrapper
+ }
+
+ output = &PutObjectOutput{}
+ err = obsClient.doHTTPWithSignedURL("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers
+func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
+ output = &CopyObjectOutput{}
+ err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyObjectOutput(output)
+ }
+ return
+}
+
+// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers
+func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers
+func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
+ output = &InitiateMultipartUploadOutput{}
+ err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseInitiateMultipartUploadOutput(output)
+ }
+ return
+}
+
+// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID
+// with the specified signed url and signed request headers and data
+func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
+ output = &UploadPartOutput{}
+ err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseUploadPartOutput(output)
+ }
+ return
+}
+
+// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID
+// with the specified signed url and signed request headers and data
+func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
+ output = &CompleteMultipartUploadOutput{}
+ err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCompleteMultipartUploadOutput(output)
+ }
+ return
+}
+
+// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers
+func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
+ output = &ListPartsOutput{}
+ err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers
+func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
+ output = &CopyPartOutput{}
+ err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyPartOutput(output)
+ }
+ return
+}
+
+// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data
+func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers
+func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) {
+ output = &GetBucketRequestPaymentOutput{}
+ err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
diff --git a/modules/obs/trait.go b/modules/obs/trait.go
new file mode 100755
index 000000000..9a59d6a71
--- /dev/null
+++ b/modules/obs/trait.go
@@ -0,0 +1,909 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:structcheck, unused
+//nolint:golint, unused
+package obs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// IReadCloser defines interface with function: setReadCloser
+type IReadCloser interface {
+ setReadCloser(body io.ReadCloser)
+}
+
+func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
+ output.Body = body
+}
+
+func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) {
+ if isObs {
+ header = HEADER_PREFIX_OBS + header
+ headers[header] = headerValue
+ } else {
+ header = HEADER_PREFIX + header
+ headers[header] = headerValue
+ }
+}
+
+func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) {
+ if isObs {
+ headers[header] = headerValue
+ } else {
+ headers[headerNext] = headerValue
+ }
+}
+
+// IBaseModel defines interface for base response model
+type IBaseModel interface {
+ setStatusCode(statusCode int)
+
+ setRequestID(requestID string)
+
+ setResponseHeaders(responseHeaders map[string][]string)
+}
+
+// ISerializable defines interface with function: trans
+type ISerializable interface {
+ trans(isObs bool) (map[string]string, map[string][]string, interface{}, error)
+}
+
+// DefaultSerializable defines default serializable struct
+type DefaultSerializable struct {
+ params map[string]string
+ headers map[string][]string
+ data interface{}
+}
+
+func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) {
+ return input.params, input.headers, input.data, nil
+}
+
+var defaultSerializable = &DefaultSerializable{}
+
+func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
+ return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
+}
+
+func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(subResource): ""}
+ data, err = ConvertRequestToIoReader(input)
+ return
+}
+
+func (baseModel *BaseModel) setStatusCode(statusCode int) {
+ baseModel.StatusCode = statusCode
+}
+
+func (baseModel *BaseModel) setRequestID(requestID string) {
+ baseModel.RequestId = requestID
+}
+
+func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
+ baseModel.ResponseHeaders = responseHeaders
+}
+
+func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string)
+ if input.QueryLocation && !isObs {
+ setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs)
+ }
+ return
+}
+
+func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
+ if grantReadID := input.GrantReadId; grantReadID != "" {
+ setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs)
+ }
+ if grantWriteID := input.GrantWriteId; grantWriteID != "" {
+ setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs)
+ }
+ if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" {
+ setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs)
+ }
+ if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" {
+ setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs)
+ }
+ if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" {
+ setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs)
+ }
+ if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" {
+ setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true)
+ }
+ if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" {
+ setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true)
+ }
+}
+
+func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string)
+ if acl := string(input.ACL); acl != "" {
+ setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
+ }
+ if storageClass := string(input.StorageClass); storageClass != "" {
+ if !isObs {
+ if storageClass == string(StorageClassWarm) {
+ storageClass = string(storageClassStandardIA)
+ } else if storageClass == string(StorageClassCold) {
+ storageClass = string(storageClassGlacier)
+ }
+ }
+ setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs)
+ }
+ if epid := input.Epid; epid != "" {
+ setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs)
+ }
+ if availableZone := input.AvailableZone; availableZone != "" {
+ setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs)
+ }
+
+ input.prepareGrantHeaders(headers, isObs)
+ if location := strings.TrimSpace(input.Location); location != "" {
+ input.Location = location
+
+ xml := make([]string, 0, 3)
+ xml = append(xml, "")
+ if isObs {
+ xml = append(xml, fmt.Sprintf("%s", input.Location))
+ } else {
+ xml = append(xml, fmt.Sprintf("%s", input.Location))
+ }
+ xml = append(xml, "")
+
+ data = strings.Join(xml, "")
+ }
+ return
+}
+
+func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ xml := make([]string, 0, 1)
+ if !isObs {
+ storageClass := "STANDARD"
+ if input.StorageClass == StorageClassWarm {
+ storageClass = string(storageClassStandardIA)
+ } else if input.StorageClass == StorageClassCold {
+ storageClass = string(storageClassGlacier)
+ }
+ params = map[string]string{string(SubResourceStoragePolicy): ""}
+ xml = append(xml, fmt.Sprintf("%s", storageClass))
+ } else {
+ if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold {
+ input.StorageClass = StorageClassStandard
+ }
+ params = map[string]string{string(SubResourceStorageClass): ""}
+ xml = append(xml, fmt.Sprintf("%s", input.StorageClass))
+ }
+ data = strings.Join(xml, "")
+ return
+}
+
+func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = make(map[string]string)
+ if input.Prefix != "" {
+ params["prefix"] = input.Prefix
+ }
+ if input.Delimiter != "" {
+ params["delimiter"] = input.Delimiter
+ }
+ if input.MaxKeys > 0 {
+ params["max-keys"] = IntToString(input.MaxKeys)
+ }
+ headers = make(map[string][]string)
+ if origin := strings.TrimSpace(input.Origin); origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{origin}
+ }
+ if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
+ }
+ return
+}
+
+func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.ListObjsInput.trans(isObs)
+ if err != nil {
+ return
+ }
+ if input.Marker != "" {
+ params["marker"] = input.Marker
+ }
+ return
+}
+
+func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.ListObjsInput.trans(isObs)
+ if err != nil {
+ return
+ }
+ params[string(SubResourceVersions)] = ""
+ if input.KeyMarker != "" {
+ params["key-marker"] = input.KeyMarker
+ }
+ if input.VersionIdMarker != "" {
+ params["version-id-marker"] = input.VersionIdMarker
+ }
+ return
+}
+
+func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceUploads): ""}
+ if input.Prefix != "" {
+ params["prefix"] = input.Prefix
+ }
+ if input.Delimiter != "" {
+ params["delimiter"] = input.Delimiter
+ }
+ if input.MaxUploads > 0 {
+ params["max-uploads"] = IntToString(input.MaxUploads)
+ }
+ if input.KeyMarker != "" {
+ params["key-marker"] = input.KeyMarker
+ }
+ if input.UploadIdMarker != "" {
+ params["upload-id-marker"] = input.UploadIdMarker
+ }
+ return
+}
+
+func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ return trans(SubResourceQuota, input)
+}
+
+func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ headers = make(map[string][]string)
+
+ if acl := string(input.ACL); acl != "" {
+ setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
+ } else {
+ data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs)
+ }
+ return
+}
+
+func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourcePolicy): ""}
+ data = strings.NewReader(input.Policy)
+ return
+}
+
+func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceCors): ""}
+ data, md5, err := ConvertRequestToIoReaderV2(input)
+ if err != nil {
+ return
+ }
+ headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
+ return
+}
+
+func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ return trans(SubResourceVersioning, input)
+}
+
+func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceWebsite): ""}
+ data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
+ return
+}
+
+func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string)
+ if origin := strings.TrimSpace(input.Origin); origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{origin}
+ }
+ if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
+ }
+ return
+}
+
+func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceLogging): ""}
+ data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs)
+ return
+}
+
+func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceLifecycle): ""}
+ data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs)
+ headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
+ return
+}
+
+func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceTagging): ""}
+ data, md5, err := ConvertRequestToIoReaderV2(input)
+ if err != nil {
+ return
+ }
+ headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
+ return
+}
+
+func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceNotification): ""}
+ data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs)
+ return
+}
+
+func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = make(map[string]string)
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ return
+}
+
+func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceDelete): ""}
+ data, md5, err := ConvertRequestToIoReaderV2(input)
+ if err != nil {
+ return
+ }
+ headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
+ return
+}
+
+func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ headers = make(map[string][]string)
+ if acl := string(input.ACL); acl != "" {
+ setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
+ } else {
+ data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs)
+ }
+ return
+}
+
+func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ return
+}
+
+func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{string(SubResourceRestore): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ if !isObs {
+ data, err = ConvertRequestToIoReader(input)
+ } else {
+ data = ConverntObsRestoreToXml(input)
+ }
+ return
+}
+
+// GetEncryption gets the Encryption field value from SseKmsHeader
+func (header SseKmsHeader) GetEncryption() string {
+ if header.Encryption != "" {
+ return header.Encryption
+ }
+ if !header.isObs {
+ return DEFAULT_SSE_KMS_ENCRYPTION
+ }
+ return DEFAULT_SSE_KMS_ENCRYPTION_OBS
+}
+
+// GetKey gets the Key field value from SseKmsHeader
+func (header SseKmsHeader) GetKey() string {
+ return header.Key
+}
+
+// GetEncryption gets the Encryption field value from SseCHeader
+func (header SseCHeader) GetEncryption() string {
+ if header.Encryption != "" {
+ return header.Encryption
+ }
+ return DEFAULT_SSE_C_ENCRYPTION
+}
+
+// GetKey gets the Key field value from SseCHeader
+func (header SseCHeader) GetKey() string {
+ return header.Key
+}
+
+// GetKeyMD5 gets the KeyMD5 field value from SseCHeader
+func (header SseCHeader) GetKeyMD5() string {
+ if header.KeyMD5 != "" {
+ return header.KeyMD5
+ }
+
+ if ret, err := Base64Decode(header.GetKey()); err == nil {
+ return Base64Md5(ret)
+ }
+ return ""
+}
+
+func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) {
+ if sseHeader != nil {
+ if sseCHeader, ok := sseHeader.(SseCHeader); ok {
+ setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
+ setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs)
+ setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
+ } else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
+ sseKmsHeader.isObs = isObs
+ setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs)
+ if sseKmsHeader.GetKey() != "" {
+ setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs)
+ }
+ }
+ }
+}
+
+func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = make(map[string]string)
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ headers = make(map[string][]string)
+
+ if input.Origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
+ }
+
+ if input.RequestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
+ }
+ setSseHeader(headers, input.SseHeader, true, isObs)
+ return
+}
+
+func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) {
+ if input.ContentDisposition != "" {
+ headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
+ }
+ if input.ContentEncoding != "" {
+ headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
+ }
+ if input.ContentLanguage != "" {
+ headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
+ }
+
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
+ }
+}
+
+func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) {
+ if storageClass := string(input.StorageClass); storageClass != "" {
+ if !isObs {
+ if storageClass == string(StorageClassWarm) {
+ storageClass = string(storageClassStandardIA)
+ } else if storageClass == string(StorageClassCold) {
+ storageClass = string(storageClassGlacier)
+ }
+ }
+ setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
+ }
+}
+
+func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = make(map[string]string)
+ params = map[string]string{string(SubResourceMetadata): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ headers = make(map[string][]string)
+
+ if directive := string(input.MetadataDirective); directive != "" {
+ setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs)
+ } else {
+ setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs)
+ }
+ if input.CacheControl != "" {
+ headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
+ }
+ input.prepareContentHeaders(headers)
+ if input.Expires != "" {
+ headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
+ }
+ if input.WebsiteRedirectLocation != "" {
+ setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
+ }
+ input.prepareStorageClass(headers, isObs)
+ if input.Metadata != nil {
+ for key, value := range input.Metadata {
+ key = strings.TrimSpace(key)
+ setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
+ }
+ }
+ return
+}
+
+func (input GetObjectInput) prepareResponseParams(params map[string]string) {
+ if input.ResponseCacheControl != "" {
+ params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
+ }
+ if input.ResponseContentDisposition != "" {
+ params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
+ }
+ if input.ResponseContentEncoding != "" {
+ params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
+ }
+ if input.ResponseContentLanguage != "" {
+ params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
+ }
+ if input.ResponseContentType != "" {
+ params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
+ }
+ if input.ResponseExpires != "" {
+ params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
+ }
+}
+
+func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.GetObjectMetadataInput.trans(isObs)
+ if err != nil {
+ return
+ }
+ input.prepareResponseParams(params)
+ if input.ImageProcess != "" {
+ params[PARAM_IMAGE_PROCESS] = input.ImageProcess
+ }
+ if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
+ headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
+ }
+
+ if input.IfMatch != "" {
+ headers[HEADER_IF_MATCH] = []string{input.IfMatch}
+ }
+ if input.IfNoneMatch != "" {
+ headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
+ }
+ if !input.IfModifiedSince.IsZero() {
+ headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
+ }
+ if !input.IfUnmodifiedSince.IsZero() {
+ headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
+ }
+ return
+}
+
+func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string) {
+ if GrantReadID := input.GrantReadId; GrantReadID != "" {
+ setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, true)
+ }
+ if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" {
+ setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, true)
+ }
+ if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" {
+ setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, true)
+ }
+ if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" {
+ setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, true)
+ }
+}
+
+func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string)
+ params = make(map[string]string)
+ if acl := string(input.ACL); acl != "" {
+ setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
+ }
+ input.prepareGrantHeaders(headers)
+ if storageClass := string(input.StorageClass); storageClass != "" {
+ if !isObs {
+ if storageClass == string(StorageClassWarm) {
+ storageClass = string(storageClassStandardIA)
+ } else if storageClass == string(StorageClassCold) {
+ storageClass = string(storageClassGlacier)
+ }
+ }
+ setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
+ }
+ if input.WebsiteRedirectLocation != "" {
+ setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
+
+ }
+ setSseHeader(headers, input.SseHeader, false, isObs)
+ if input.Expires != 0 {
+ setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true)
+ }
+ if input.Metadata != nil {
+ for key, value := range input.Metadata {
+ key = strings.TrimSpace(key)
+ setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
+ }
+ }
+ return
+}
+
+func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.ObjectOperationInput.trans(isObs)
+ if err != nil {
+ return
+ }
+
+ if input.ContentMD5 != "" {
+ headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
+ }
+
+ if input.ContentLength > 0 {
+ headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
+ }
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
+ }
+
+ return
+}
+
+func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
+ if err != nil {
+ return
+ }
+ if input.Body != nil {
+ data = input.Body
+ }
+ return
+}
+
+func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) {
+ if input.CacheControl != "" {
+ headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
+ }
+ if input.ContentDisposition != "" {
+ headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
+ }
+ if input.ContentEncoding != "" {
+ headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
+ }
+ if input.ContentLanguage != "" {
+ headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
+ }
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
+ }
+ if input.Expires != "" {
+ headers[HEADER_EXPIRES] = []string{input.Expires}
+ }
+}
+
+func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) {
+ if input.CopySourceIfMatch != "" {
+ setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs)
+ }
+ if input.CopySourceIfNoneMatch != "" {
+ setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs)
+ }
+ if !input.CopySourceIfModifiedSince.IsZero() {
+ setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs)
+ }
+ if !input.CopySourceIfUnmodifiedSince.IsZero() {
+ setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs)
+ }
+}
+
+func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.ObjectOperationInput.trans(isObs)
+ if err != nil {
+ return
+ }
+
+ var copySource string
+ if input.CopySourceVersionId != "" {
+ copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
+ } else {
+ copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
+ }
+ setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
+
+ if directive := string(input.MetadataDirective); directive != "" {
+ setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs)
+ }
+
+ if input.MetadataDirective == ReplaceMetadata {
+ input.prepareReplaceHeaders(headers)
+ }
+
+ input.prepareCopySourceHeaders(headers, isObs)
+ if input.SourceSseHeader != nil {
+ if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
+ }
+ }
+ if input.SuccessActionRedirect != "" {
+ headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect}
+ }
+ return
+}
+
+func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{"uploadId": input.UploadId}
+ return
+}
+
+func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params, headers, data, err = input.ObjectOperationInput.trans(isObs)
+ if err != nil {
+ return
+ }
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
+ }
+ params[string(SubResourceUploads)] = ""
+ return
+}
+
+func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
+ headers = make(map[string][]string)
+ setSseHeader(headers, input.SseHeader, true, isObs)
+ if input.ContentMD5 != "" {
+ headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
+ }
+ if input.Body != nil {
+ data = input.Body
+ }
+ return
+}
+
+func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{"uploadId": input.UploadId}
+ data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
+ return
+}
+
+func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{"uploadId": input.UploadId}
+ if input.MaxParts > 0 {
+ params["max-parts"] = IntToString(input.MaxParts)
+ }
+ if input.PartNumberMarker > 0 {
+ params["part-number-marker"] = IntToString(input.PartNumberMarker)
+ }
+ return
+}
+
+func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
+ headers = make(map[string][]string, 1)
+ var copySource string
+ if input.CopySourceVersionId != "" {
+ copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
+ } else {
+ copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
+ }
+ setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
+ if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
+ setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs)
+ }
+
+ setSseHeader(headers, input.SseHeader, true, isObs)
+ if input.SourceSseHeader != nil {
+ if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
+ setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
+ }
+
+ }
+ return
+}
+
+func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ params = make(map[string]string)
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ return
+}
+
+func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ return trans(SubResourceRequestPayment, input)
+}
+
+type partSlice []Part
+
+func (parts partSlice) Len() int {
+ return len(parts)
+}
+
+func (parts partSlice) Less(i, j int) bool {
+ return parts[i].PartNumber < parts[j].PartNumber
+}
+
+func (parts partSlice) Swap(i, j int) {
+ parts[i], parts[j] = parts[j], parts[i]
+}
+
+type readerWrapper struct {
+ reader io.Reader
+ mark int64
+ totalCount int64
+ readedCount int64
+}
+
+func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
+ if r, ok := rw.reader.(*strings.Reader); ok {
+ return r.Seek(offset, whence)
+ } else if r, ok := rw.reader.(*bytes.Reader); ok {
+ return r.Seek(offset, whence)
+ } else if r, ok := rw.reader.(*os.File); ok {
+ return r.Seek(offset, whence)
+ }
+ return offset, nil
+}
+
+func (rw *readerWrapper) Read(p []byte) (n int, err error) {
+ if rw.totalCount == 0 {
+ return 0, io.EOF
+ }
+ if rw.totalCount > 0 {
+ n, err = rw.reader.Read(p)
+ readedOnce := int64(n)
+ remainCount := rw.totalCount - rw.readedCount
+ if remainCount > readedOnce {
+ rw.readedCount += readedOnce
+ return n, err
+ }
+ rw.readedCount += remainCount
+ return int(remainCount), io.EOF
+ }
+ return rw.reader.Read(p)
+}
+
+type fileReaderWrapper struct {
+ readerWrapper
+ filePath string
+}
+
+func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ contentType, _ := mimeTypes["json"]
+ headers = make(map[string][]string, 2)
+ headers[HEADER_CONTENT_TYPE] = []string{contentType}
+ setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
+ data, err = convertFetchPolicyToJSON(input)
+ return
+}
+
+func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string, 1)
+ setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
+ return
+}
+
+func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string, 1)
+ setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
+ return
+}
+
+func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ contentType, _ := mimeTypes["json"]
+ headers = make(map[string][]string, 2)
+ headers[HEADER_CONTENT_TYPE] = []string{contentType}
+ setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
+ data, err = convertFetchJobToJSON(input)
+ return
+}
+
+func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
+ headers = make(map[string][]string, 1)
+ setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
+ return
+}
diff --git a/modules/obs/transfer.go b/modules/obs/transfer.go
new file mode 100755
index 000000000..4dc50c0f9
--- /dev/null
+++ b/modules/obs/transfer.go
@@ -0,0 +1,873 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "bufio"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "syscall"
+)
+
+var errAbort = errors.New("AbortError")
+
+// FileStatus defines the upload file properties
+type FileStatus struct {
+ XMLName xml.Name `xml:"FileInfo"`
+ LastModified int64 `xml:"LastModified"`
+ Size int64 `xml:"Size"`
+}
+
+// UploadPartInfo defines the upload part properties
+type UploadPartInfo struct {
+ XMLName xml.Name `xml:"UploadPart"`
+ PartNumber int `xml:"PartNumber"`
+ Etag string `xml:"Etag"`
+ PartSize int64 `xml:"PartSize"`
+ Offset int64 `xml:"Offset"`
+ IsCompleted bool `xml:"IsCompleted"`
+}
+
+// UploadCheckpoint defines the upload checkpoint file properties
+type UploadCheckpoint struct {
+ XMLName xml.Name `xml:"UploadFileCheckpoint"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId,omitempty"`
+ UploadFile string `xml:"FileUrl"`
+ FileInfo FileStatus `xml:"FileInfo"`
+ UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"`
+}
+
+func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool {
+ if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.")
+ return false
+ }
+
+ if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.")
+ return false
+ }
+
+ if ufc.UploadId == "" {
+ doLog(LEVEL_INFO, "UploadId is invalid. clear the record.")
+ return false
+ }
+
+ return true
+}
+
+type uploadPartTask struct {
+ UploadPartInput
+ obsClient *ObsClient
+ abort *int32
+ extensions []extensionOptions
+ enableCheckpoint bool
+}
+
+func (task *uploadPartTask) Run() interface{} {
+ if atomic.LoadInt32(task.abort) == 1 {
+ return errAbort
+ }
+
+ input := &UploadPartInput{}
+ input.Bucket = task.Bucket
+ input.Key = task.Key
+ input.PartNumber = task.PartNumber
+ input.UploadId = task.UploadId
+ input.SseHeader = task.SseHeader
+ input.SourceFile = task.SourceFile
+ input.Offset = task.Offset
+ input.PartSize = task.PartSize
+ extensions := task.extensions
+
+ var output *UploadPartOutput
+ var err error
+ if extensions != nil {
+ output, err = task.obsClient.UploadPart(input, extensions...)
+ } else {
+ output, err = task.obsClient.UploadPart(input)
+ }
+
+ if err == nil {
+ if output.ETag == "" {
+ doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber)
+ if !task.enableCheckpoint {
+ atomic.CompareAndSwapInt32(task.abort, 0, 1)
+ doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
+ }
+ return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber)
+ }
+ return output
+ } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
+ atomic.CompareAndSwapInt32(task.abort, 0, 1)
+ doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
+ }
+ return err
+}
+
+func loadCheckpointFile(checkpointFile string, result interface{}) error {
+ ret, err := ioutil.ReadFile(checkpointFile)
+ if err != nil {
+ return err
+ }
+ if len(ret) == 0 {
+ return nil
+ }
+ return xml.Unmarshal(ret, result)
+}
+
+func updateCheckpointFile(fc interface{}, checkpointFilePath string) error {
+ result, err := xml.Marshal(fc)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(checkpointFilePath, result, 0666)
+ return err
+}
+
+func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) {
+ checkpointFilePath := input.CheckpointFile
+ checkpointFileStat, err := os.Stat(checkpointFilePath)
+ if err != nil {
+ doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
+ return true, nil
+ }
+ if checkpointFileStat.IsDir() {
+ doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
+ return false, errors.New("checkpoint file can not be a folder")
+ }
+ err = loadCheckpointFile(checkpointFilePath, ufc)
+ if err != nil {
+ doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
+ return true, nil
+ } else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) {
+ if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" {
+ _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId)
+ }
+ }
+ _err := os.Remove(checkpointFilePath)
+ if _err != nil {
+ doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err))
+ }
+ } else {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error {
+ initiateInput := &InitiateMultipartUploadInput{}
+ initiateInput.ObjectOperationInput = input.ObjectOperationInput
+ initiateInput.ContentType = input.ContentType
+ var output *InitiateMultipartUploadOutput
+ var err error
+ if extensions != nil {
+ output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...)
+ } else {
+ output, err = obsClient.InitiateMultipartUpload(initiateInput)
+ }
+ if err != nil {
+ return err
+ }
+
+ ufc.Bucket = input.Bucket
+ ufc.Key = input.Key
+ ufc.UploadFile = input.UploadFile
+ ufc.FileInfo = FileStatus{}
+ ufc.FileInfo.Size = uploadFileStat.Size()
+ ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix()
+ ufc.UploadId = output.UploadId
+
+ err = sliceFile(input.PartSize, ufc)
+ return err
+}
+
+func sliceFile(partSize int64, ufc *UploadCheckpoint) error {
+ fileSize := ufc.FileInfo.Size
+ cnt := fileSize / partSize
+ if cnt >= 10000 {
+ partSize = fileSize / 10000
+ if fileSize%10000 != 0 {
+ partSize++
+ }
+ cnt = fileSize / partSize
+ }
+ if fileSize%partSize != 0 {
+ cnt++
+ }
+
+ if partSize > MAX_PART_SIZE {
+ doLog(LEVEL_ERROR, "The source upload file is too large")
+ return fmt.Errorf("The source upload file is too large")
+ }
+
+ if cnt == 0 {
+ uploadPart := UploadPartInfo{}
+ uploadPart.PartNumber = 1
+ ufc.UploadParts = []UploadPartInfo{uploadPart}
+ } else {
+ uploadParts := make([]UploadPartInfo, 0, cnt)
+ var i int64
+ for i = 0; i < cnt; i++ {
+ uploadPart := UploadPartInfo{}
+ uploadPart.PartNumber = int(i) + 1
+ uploadPart.PartSize = partSize
+ uploadPart.Offset = i * partSize
+ uploadParts = append(uploadParts, uploadPart)
+ }
+ if value := fileSize % partSize; value != 0 {
+ uploadParts[cnt-1].PartSize = value
+ }
+ ufc.UploadParts = uploadParts
+ }
+ return nil
+}
+
+func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error {
+ input := &AbortMultipartUploadInput{}
+ input.Bucket = bucket
+ input.Key = key
+ input.UploadId = uploadID
+ if extensions != nil {
+ _, err := obsClient.AbortMultipartUpload(input, extensions...)
+ return err
+ }
+ _, err := obsClient.AbortMultipartUpload(input)
+ return err
+}
+
+func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error {
+ if uploadPartError != nil {
+ if enableCheckpoint {
+ return uploadPartError
+ }
+ _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
+ }
+ return uploadPartError
+ }
+ return nil
+}
+
+func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
+ completeInput := &CompleteMultipartUploadInput{}
+ completeInput.Bucket = ufc.Bucket
+ completeInput.Key = ufc.Key
+ completeInput.UploadId = ufc.UploadId
+ parts := make([]Part, 0, len(ufc.UploadParts))
+ for _, uploadPart := range ufc.UploadParts {
+ part := Part{}
+ part.PartNumber = uploadPart.PartNumber
+ part.ETag = uploadPart.Etag
+ parts = append(parts, part)
+ }
+ completeInput.Parts = parts
+ var completeOutput *CompleteMultipartUploadOutput
+ if extensions != nil {
+ completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...)
+ } else {
+ completeOutput, err = obsClient.CompleteMultipartUpload(completeInput)
+ }
+
+ if err == nil {
+ if enableCheckpoint {
+ _err := os.Remove(checkpointFilePath)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err)
+ }
+ }
+ return completeOutput, err
+ }
+ if !enableCheckpoint {
+ _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
+ }
+ }
+ return completeOutput, err
+}
+
+func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
+ uploadFileStat, err := os.Stat(input.UploadFile)
+ if err != nil {
+ doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err))
+ return nil, err
+ }
+ if uploadFileStat.IsDir() {
+ doLog(LEVEL_ERROR, "UploadFile can not be a folder.")
+ return nil, errors.New("uploadFile can not be a folder")
+ }
+
+ ufc := &UploadCheckpoint{}
+
+ var needCheckpoint = true
+ var checkpointFilePath = input.CheckpointFile
+ var enableCheckpoint = input.EnableCheckpoint
+ if enableCheckpoint {
+ needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if needCheckpoint {
+ err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ if enableCheckpoint {
+ err = updateCheckpointFile(ufc, checkpointFilePath)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err)
+ _err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
+ }
+ return nil, err
+ }
+ }
+ }
+
+ uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions)
+ err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, extensions)
+
+ return completeOutput, err
+}
+
+func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex) (err error) {
+ if uploadPartOutput, ok := result.(*UploadPartOutput); ok {
+ lock.Lock()
+ defer lock.Unlock()
+ ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag
+ ufc.UploadParts[partNum-1].IsCompleted = true
+ if enableCheckpoint {
+ _err := updateCheckpointFile(ufc, checkpointFilePath)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
+ }
+ }
+ } else if result != errAbort {
+ if _err, ok := result.(error); ok {
+ err = _err
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error {
+ pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
+ var uploadPartError atomic.Value
+ var errFlag int32
+ var abort int32
+ lock := new(sync.Mutex)
+ for _, uploadPart := range ufc.UploadParts {
+ if atomic.LoadInt32(&abort) == 1 {
+ break
+ }
+ if uploadPart.IsCompleted {
+ continue
+ }
+ task := uploadPartTask{
+ UploadPartInput: UploadPartInput{
+ Bucket: ufc.Bucket,
+ Key: ufc.Key,
+ PartNumber: uploadPart.PartNumber,
+ UploadId: ufc.UploadId,
+ SseHeader: input.SseHeader,
+ SourceFile: input.UploadFile,
+ Offset: uploadPart.Offset,
+ PartSize: uploadPart.PartSize,
+ },
+ obsClient: &obsClient,
+ abort: &abort,
+ extensions: extensions,
+ enableCheckpoint: input.EnableCheckpoint,
+ }
+ pool.ExecuteFunc(func() interface{} {
+ result := task.Run()
+ err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
+ if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
+ uploadPartError.Store(err)
+ }
+ return nil
+ })
+ }
+ pool.ShutDown()
+ if err, ok := uploadPartError.Load().(error); ok {
+ return err
+ }
+ return nil
+}
+
+// ObjectInfo defines download object info
+type ObjectInfo struct {
+ XMLName xml.Name `xml:"ObjectInfo"`
+ LastModified int64 `xml:"LastModified"`
+ Size int64 `xml:"Size"`
+ ETag string `xml:"ETag"`
+}
+
+// TempFileInfo defines temp download file properties
+type TempFileInfo struct {
+ XMLName xml.Name `xml:"TempFileInfo"`
+ TempFileUrl string `xml:"TempFileUrl"`
+ Size int64 `xml:"Size"`
+}
+
+// DownloadPartInfo defines download part properties
+type DownloadPartInfo struct {
+ XMLName xml.Name `xml:"DownloadPart"`
+ PartNumber int64 `xml:"PartNumber"`
+ RangeEnd int64 `xml:"RangeEnd"`
+ Offset int64 `xml:"Offset"`
+ IsCompleted bool `xml:"IsCompleted"`
+}
+
+// DownloadCheckpoint defines download checkpoint file properties
+type DownloadCheckpoint struct {
+ XMLName xml.Name `xml:"DownloadFileCheckpoint"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId,omitempty"`
+ DownloadFile string `xml:"FileUrl"`
+ ObjectInfo ObjectInfo `xml:"ObjectInfo"`
+ TempFileInfo TempFileInfo `xml:"TempFileInfo"`
+ DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"`
+}
+
+func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool {
+ if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.")
+ return false
+ }
+ if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.")
+ return false
+ }
+ if dfc.TempFileInfo.Size != output.ContentLength {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.")
+ return false
+ }
+ stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl)
+ if err != nil || stat.Size() != dfc.ObjectInfo.Size {
+ doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.")
+ return false
+ }
+
+ return true
+}
+
+type downloadPartTask struct {
+ GetObjectInput
+ obsClient *ObsClient
+ extensions []extensionOptions
+ abort *int32
+ partNumber int64
+ tempFileURL string
+ enableCheckpoint bool
+}
+
+func (task *downloadPartTask) Run() interface{} {
+ if atomic.LoadInt32(task.abort) == 1 {
+ return errAbort
+ }
+ getObjectInput := &GetObjectInput{}
+ getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput
+ getObjectInput.IfMatch = task.IfMatch
+ getObjectInput.IfNoneMatch = task.IfNoneMatch
+ getObjectInput.IfModifiedSince = task.IfModifiedSince
+ getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince
+ getObjectInput.RangeStart = task.RangeStart
+ getObjectInput.RangeEnd = task.RangeEnd
+
+ var output *GetObjectOutput
+ var err error
+ if task.extensions != nil {
+ output, err = task.obsClient.GetObject(getObjectInput, task.extensions...)
+ } else {
+ output, err = task.obsClient.GetObject(getObjectInput)
+ }
+
+ if err == nil {
+ defer func() {
+ errMsg := output.Body.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close response body.")
+ }
+ }()
+ _err := updateDownloadFile(task.tempFileURL, task.RangeStart, output)
+ if _err != nil {
+ if !task.enableCheckpoint {
+ atomic.CompareAndSwapInt32(task.abort, 0, 1)
+ doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
+ }
+ return _err
+ }
+ return output
+ } else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
+ atomic.CompareAndSwapInt32(task.abort, 0, 1)
+ doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
+ }
+ return err
+}
+
+func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) {
+ if extensions != nil {
+ getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...)
+ } else {
+ getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput)
+ }
+
+ return
+}
+
+func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) {
+ checkpointFilePath := input.CheckpointFile
+ checkpointFileStat, err := os.Stat(checkpointFilePath)
+ if err != nil {
+ doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
+ return true, nil
+ }
+ if checkpointFileStat.IsDir() {
+ doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
+ return false, errors.New("checkpoint file can not be a folder")
+ }
+ err = loadCheckpointFile(checkpointFilePath, dfc)
+ if err != nil {
+ doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
+ return true, nil
+ } else if !dfc.isValid(input, output) {
+ if dfc.TempFileInfo.TempFileUrl != "" {
+ _err := os.Remove(dfc.TempFileInfo.TempFileUrl)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
+ }
+ }
+ _err := os.Remove(checkpointFilePath)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err)
+ }
+ } else {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) {
+ cnt := objectSize / partSize
+ if objectSize%partSize > 0 {
+ cnt++
+ }
+
+ if cnt == 0 {
+ downloadPart := DownloadPartInfo{}
+ downloadPart.PartNumber = 1
+ dfc.DownloadParts = []DownloadPartInfo{downloadPart}
+ } else {
+ downloadParts := make([]DownloadPartInfo, 0, cnt)
+ var i int64
+ for i = 0; i < cnt; i++ {
+ downloadPart := DownloadPartInfo{}
+ downloadPart.PartNumber = i + 1
+ downloadPart.Offset = i * partSize
+ downloadPart.RangeEnd = (i+1)*partSize - 1
+ downloadParts = append(downloadParts, downloadPart)
+ }
+ dfc.DownloadParts = downloadParts
+ if value := objectSize % partSize; value > 0 {
+ dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1
+ }
+ }
+}
+
+func createFile(tempFileURL string, fileSize int64) error {
+ fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL)
+ return err
+ }
+ defer func() {
+ errMsg := syscall.Close(fd)
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
+ }
+ }()
+ err = syscall.Ftruncate(fd, fileSize)
+ if err != nil {
+ doLog(LEVEL_WARN, "Failed to create file with error [%v].", err)
+ }
+ return err
+}
+
+func prepareTempFile(tempFileURL string, fileSize int64) error {
+ parentDir := filepath.Dir(tempFileURL)
+ stat, err := os.Stat(parentDir)
+ if err != nil {
+ doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err)
+ _err := os.MkdirAll(parentDir, os.ModePerm)
+ if _err != nil {
+ doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err)
+ return _err
+ }
+ } else if !stat.IsDir() {
+ doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir)
+ return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir)
+ }
+
+ err = createFile(tempFileURL, fileSize)
+ if err == nil {
+ return nil
+ }
+ fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL)
+ return err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
+ }
+ }()
+ if fileSize > 0 {
+ _, err = fd.WriteAt([]byte("a"), fileSize-1)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error {
+ if downloadFileError != nil {
+ if !enableCheckpoint {
+ _err := os.Remove(tempFileURL)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
+ }
+ }
+ return downloadFileError
+ }
+ return nil
+}
+
+func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) {
+ getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions)
+ if err != nil {
+ return nil, err
+ }
+
+ objectSize := getObjectmetaOutput.ContentLength
+ partSize := input.PartSize
+ dfc := &DownloadCheckpoint{}
+
+ var needCheckpoint = true
+ var checkpointFilePath = input.CheckpointFile
+ var enableCheckpoint = input.EnableCheckpoint
+ if enableCheckpoint {
+ needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if needCheckpoint {
+ dfc.Bucket = input.Bucket
+ dfc.Key = input.Key
+ dfc.VersionId = input.VersionId
+ dfc.DownloadFile = input.DownloadFile
+ dfc.ObjectInfo = ObjectInfo{}
+ dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix()
+ dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength
+ dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag
+ dfc.TempFileInfo = TempFileInfo{}
+ dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp"
+ dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength
+
+ sliceObject(objectSize, partSize, dfc)
+ _err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size)
+ if _err != nil {
+ return nil, _err
+ }
+
+ if enableCheckpoint {
+ _err := updateCheckpointFile(dfc, checkpointFilePath)
+ if _err != nil {
+ doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err)
+ _errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl)
+ if _errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg)
+ }
+ return nil, _err
+ }
+ }
+ }
+
+ downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions)
+ err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError)
+ if err != nil {
+ return nil, err
+ }
+
+ err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err)
+ return nil, err
+ }
+ if enableCheckpoint {
+ err = os.Remove(checkpointFilePath)
+ if err != nil {
+ doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err)
+ }
+ }
+
+ return getObjectmetaOutput, nil
+}
+
+func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error {
+ fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath)
+ return err
+ }
+ defer func() {
+ errMsg := fd.Close()
+ if errMsg != nil {
+ doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
+ }
+ }()
+ _, err = fd.Seek(rangeStart, 0)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err)
+ return err
+ }
+ fileWriter := bufio.NewWriterSize(fd, 65536)
+ part := make([]byte, 8192)
+ var readErr error
+ var readCount int
+ for {
+ readCount, readErr = output.Body.Read(part)
+ if readCount > 0 {
+ wcnt, werr := fileWriter.Write(part[0:readCount])
+ if werr != nil {
+ doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr)
+ return werr
+ }
+ if wcnt != readCount {
+ doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
+ return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
+ }
+ }
+ if readErr != nil {
+ if readErr != io.EOF {
+ doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr)
+ return readErr
+ }
+ break
+ }
+ }
+ err = fileWriter.Flush()
+ if err != nil {
+ doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err)
+ return err
+ }
+ return nil
+}
+
+func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex) (err error) {
+ if _, ok := result.(*GetObjectOutput); ok {
+ lock.Lock()
+ defer lock.Unlock()
+ dfc.DownloadParts[partNum-1].IsCompleted = true
+ if enableCheckpoint {
+ _err := updateCheckpointFile(dfc, checkpointFile)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
+ }
+ }
+ } else if result != errAbort {
+ if _err, ok := result.(error); ok {
+ err = _err
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error {
+ pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
+ var downloadPartError atomic.Value
+ var errFlag int32
+ var abort int32
+ lock := new(sync.Mutex)
+ for _, downloadPart := range dfc.DownloadParts {
+ if atomic.LoadInt32(&abort) == 1 {
+ break
+ }
+ if downloadPart.IsCompleted {
+ continue
+ }
+ task := downloadPartTask{
+ GetObjectInput: GetObjectInput{
+ GetObjectMetadataInput: input.GetObjectMetadataInput,
+ IfMatch: input.IfMatch,
+ IfNoneMatch: input.IfNoneMatch,
+ IfUnmodifiedSince: input.IfUnmodifiedSince,
+ IfModifiedSince: input.IfModifiedSince,
+ RangeStart: downloadPart.Offset,
+ RangeEnd: downloadPart.RangeEnd,
+ },
+ obsClient: &obsClient,
+ extensions: extensions,
+ abort: &abort,
+ partNumber: downloadPart.PartNumber,
+ tempFileURL: dfc.TempFileInfo.TempFileUrl,
+ enableCheckpoint: input.EnableCheckpoint,
+ }
+ pool.ExecuteFunc(func() interface{} {
+ result := task.Run()
+ err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock)
+ if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
+ downloadPartError.Store(err)
+ }
+ return nil
+ })
+ }
+ pool.ShutDown()
+ if err, ok := downloadPartError.Load().(error); ok {
+ return err
+ }
+
+ return nil
+}
diff --git a/modules/obs/util.go b/modules/obs/util.go
new file mode 100755
index 000000000..f3378dff9
--- /dev/null
+++ b/modules/obs/util.go
@@ -0,0 +1,536 @@
+// Copyright 2019 Huawei Technologies Co.,Ltd.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//nolint:golint, unused
+package obs
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
+var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$")
+var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+")
+var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+")
+
+// StringContains replaces subStr in src with subTranscoding and returns the new string
+func StringContains(src string, subStr string, subTranscoding string) string {
+ return strings.Replace(src, subStr, subTranscoding, -1)
+}
+
+// XmlTranscoding replaces special characters with their escaped form
+func XmlTranscoding(src string) string {
+ srcTmp := StringContains(src, "&", "&")
+ srcTmp = StringContains(srcTmp, "<", "<")
+ srcTmp = StringContains(srcTmp, ">", ">")
+ srcTmp = StringContains(srcTmp, "'", "'")
+ srcTmp = StringContains(srcTmp, "\"", """)
+ return srcTmp
+}
+
+// StringToInt converts string value to int value with default value
+func StringToInt(value string, def int) int {
+ ret, err := strconv.Atoi(value)
+ if err != nil {
+ ret = def
+ }
+ return ret
+}
+
+// StringToInt64 converts string value to int64 value with default value
+func StringToInt64(value string, def int64) int64 {
+ ret, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ ret = def
+ }
+ return ret
+}
+
+// IntToString converts int value to string value
+func IntToString(value int) string {
+ return strconv.Itoa(value)
+}
+
+// Int64ToString converts int64 value to string value
+func Int64ToString(value int64) string {
+ return strconv.FormatInt(value, 10)
+}
+
+// GetCurrentTimestamp gets unix time in milliseconds
+func GetCurrentTimestamp() int64 {
+ return time.Now().UnixNano() / 1000000
+}
+
+// FormatUtcNow gets a textual representation of the UTC format time value
+func FormatUtcNow(format string) string {
+ return time.Now().UTC().Format(format)
+}
+
+// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value
+func FormatUtcToRfc1123(t time.Time) string {
+ ret := t.UTC().Format(time.RFC1123)
+ return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
+}
+
+// Md5 gets the md5 value of input
+func Md5(value []byte) []byte {
+ m := md5.New()
+ _, err := m.Write(value)
+ if err != nil {
+ doLog(LEVEL_WARN, "MD5 failed to write")
+ }
+ return m.Sum(nil)
+}
+
+// HmacSha1 gets hmac sha1 value of input
+func HmacSha1(key, value []byte) []byte {
+ mac := hmac.New(sha1.New, key)
+ _, err := mac.Write(value)
+ if err != nil {
+ doLog(LEVEL_WARN, "HmacSha1 failed to write")
+ }
+ return mac.Sum(nil)
+}
+
+// HmacSha256 get hmac sha256 value if input
+func HmacSha256(key, value []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ _, err := mac.Write(value)
+ if err != nil {
+ doLog(LEVEL_WARN, "HmacSha256 failed to write")
+ }
+ return mac.Sum(nil)
+}
+
+// Base64Encode wrapper of base64.StdEncoding.EncodeToString
+func Base64Encode(value []byte) string {
+ return base64.StdEncoding.EncodeToString(value)
+}
+
+// Base64Decode wrapper of base64.StdEncoding.DecodeString
+func Base64Decode(value string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(value)
+}
+
+// HexMd5 returns the md5 value of input in hexadecimal format
+func HexMd5(value []byte) string {
+ return Hex(Md5(value))
+}
+
+// Base64Md5 returns the md5 value of input with Base64Encode
+func Base64Md5(value []byte) string {
+ return Base64Encode(Md5(value))
+}
+
+// Sha256Hash returns sha256 checksum
+func Sha256Hash(value []byte) []byte {
+ hash := sha256.New()
+ _, err := hash.Write(value)
+ if err != nil {
+ doLog(LEVEL_WARN, "Sha256Hash failed to write")
+ }
+ return hash.Sum(nil)
+}
+
+// ParseXml wrapper of xml.Unmarshal
+func ParseXml(value []byte, result interface{}) error {
+ if len(value) == 0 {
+ return nil
+ }
+ return xml.Unmarshal(value, result)
+}
+
+// parseJSON wrapper of json.Unmarshal
+func parseJSON(value []byte, result interface{}) error {
+ if len(value) == 0 {
+ return nil
+ }
+ return json.Unmarshal(value, result)
+}
+
+// TransToXml wrapper of xml.Marshal
+func TransToXml(value interface{}) ([]byte, error) {
+ if value == nil {
+ return []byte{}, nil
+ }
+ return xml.Marshal(value)
+}
+
+// Hex wrapper of hex.EncodeToString
+func Hex(value []byte) string {
+ return hex.EncodeToString(value)
+}
+
+// HexSha256 returns the Sha256Hash value of input in hexadecimal format
+func HexSha256(value []byte) string {
+ return Hex(Sha256Hash(value))
+}
+
+// UrlDecode wrapper of url.QueryUnescape
+func UrlDecode(value string) (string, error) {
+ ret, err := url.QueryUnescape(value)
+ if err == nil {
+ return ret, nil
+ }
+ return "", err
+}
+
+// UrlDecodeWithoutError wrapper of UrlDecode
+func UrlDecodeWithoutError(value string) string {
+ ret, err := UrlDecode(value)
+ if err == nil {
+ return ret
+ }
+ if isErrorLogEnabled() {
+ doLog(LEVEL_ERROR, "Url decode error")
+ }
+ return ""
+}
+
+// IsIP checks whether the value matches ip address
+func IsIP(value string) bool {
+ return ipRegex.MatchString(value)
+}
+
+// UrlEncode encodes the input value
+func UrlEncode(value string, chineseOnly bool) string {
+ if chineseOnly {
+ values := make([]string, 0, len(value))
+ for _, val := range value {
+ _value := string(val)
+ if regex.MatchString(_value) {
+ _value = url.QueryEscape(_value)
+ }
+ values = append(values, _value)
+ }
+ return strings.Join(values, "")
+ }
+ return url.QueryEscape(value)
+}
+
+func copyHeaders(m map[string][]string) (ret map[string][]string) {
+ if m != nil {
+ ret = make(map[string][]string, len(m))
+ for key, values := range m {
+ _values := make([]string, 0, len(values))
+ for _, value := range values {
+ _values = append(_values, value)
+ }
+ ret[strings.ToLower(key)] = _values
+ }
+ } else {
+ ret = make(map[string][]string)
+ }
+
+ return
+}
+
+func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) {
+ signature = "v2"
+ if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 {
+ if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) {
+ signature = "v4"
+ matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0])
+ if len(matches) >= 3 {
+ region = matches[1]
+ regions := regionRegex.FindStringSubmatch(region)
+ if len(regions) >= 2 {
+ region = regions[1]
+ }
+ signedHeaders = matches[2]
+ }
+
+ } else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) {
+ signature = "v2"
+ }
+ }
+ return
+}
+
+func getTemporaryKeys() []string {
+ return []string{
+ "Signature",
+ "signature",
+ "X-Amz-Signature",
+ "x-amz-signature",
+ }
+}
+
+func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool {
+ isObs := true
+ if isTemporary {
+ for _, value := range querys {
+ keyPrefix := strings.ToLower(value)
+ if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
+ isObs = false
+ } else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) {
+ isObs = false
+ }
+ }
+ } else {
+ for key := range headers {
+ keyPrefix := strings.ToLower(key)
+ if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
+ isObs = false
+ break
+ }
+ }
+ }
+ return isObs
+}
+
+func isPathStyle(headers map[string][]string, bucketName string) bool {
+ if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
+ return true
+ }
+ return false
+}
+
+// GetV2Authorization v2 Authorization
+func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
+
+ if strings.HasPrefix(queryURL, "?") {
+ queryURL = queryURL[1:]
+ }
+
+ method = strings.ToUpper(method)
+
+ querys := strings.Split(queryURL, "&")
+ querysResult := make([]string, 0)
+ for _, value := range querys {
+ if value != "=" && len(value) != 0 {
+ querysResult = append(querysResult, value)
+ }
+ }
+ params := make(map[string]string)
+
+ for _, value := range querysResult {
+ kv := strings.Split(value, "=")
+ length := len(kv)
+ if length == 1 {
+ key := UrlDecodeWithoutError(kv[0])
+ params[key] = ""
+ } else if length >= 2 {
+ key := UrlDecodeWithoutError(kv[0])
+ vals := make([]string, 0, length-1)
+ for i := 1; i < length; i++ {
+ val := UrlDecodeWithoutError(kv[i])
+ vals = append(vals, val)
+ }
+ params[key] = strings.Join(vals, "=")
+ }
+ }
+ headers = copyHeaders(headers)
+ pathStyle := isPathStyle(headers, bucketName)
+ conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk},
+ urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
+ pathStyle: pathStyle}
+ conf.signature = SignatureObs
+ _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
+ ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true)
+ v2HashPrefix := OBS_HASH_PREFIX
+ ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
+ return
+}
+
+// GetAuthorization Authorization
+func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
+
+ if strings.HasPrefix(queryURL, "?") {
+ queryURL = queryURL[1:]
+ }
+
+ method = strings.ToUpper(method)
+
+ querys := strings.Split(queryURL, "&")
+ querysResult := make([]string, 0)
+ for _, value := range querys {
+ if value != "=" && len(value) != 0 {
+ querysResult = append(querysResult, value)
+ }
+ }
+ params := make(map[string]string)
+
+ for _, value := range querysResult {
+ kv := strings.Split(value, "=")
+ length := len(kv)
+ if length == 1 {
+ key := UrlDecodeWithoutError(kv[0])
+ params[key] = ""
+ } else if length >= 2 {
+ key := UrlDecodeWithoutError(kv[0])
+ vals := make([]string, 0, length-1)
+ for i := 1; i < length; i++ {
+ val := UrlDecodeWithoutError(kv[i])
+ vals = append(vals, val)
+ }
+ params[key] = strings.Join(vals, "=")
+ }
+ }
+ isTemporary := false
+ signature := "v2"
+ temporaryKeys := getTemporaryKeys()
+ for _, key := range temporaryKeys {
+ if _, ok := params[key]; ok {
+ isTemporary = true
+ if strings.ToLower(key) == "signature" {
+ signature = "v2"
+ } else if strings.ToLower(key) == "x-amz-signature" {
+ signature = "v4"
+ }
+ break
+ }
+ }
+ isObs := getIsObs(isTemporary, querysResult, headers)
+ headers = copyHeaders(headers)
+ pathStyle := false
+ if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
+ pathStyle = true
+ }
+ conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk},
+ urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
+ pathStyle: pathStyle}
+
+ if isTemporary {
+ return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs)
+ }
+ signature, region, signedHeaders := parseHeaders(headers)
+ if signature == "v4" {
+ conf.signature = SignatureV4
+ requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
+ parsedRequestURL, _err := url.Parse(requestURL)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to parse requestURL")
+ return nil
+ }
+ headerKeys := strings.Split(signedHeaders, ";")
+ _headers := make(map[string][]string, len(headerKeys))
+ for _, headerKey := range headerKeys {
+ _headers[headerKey] = headers[headerKey]
+ }
+ ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers)
+ ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
+ } else if signature == "v2" {
+ if isObs {
+ conf.signature = SignatureObs
+ } else {
+ conf.signature = SignatureV2
+ }
+ _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
+ ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
+ v2HashPrefix := V2_HASH_PREFIX
+ if isObs {
+ v2HashPrefix = OBS_HASH_PREFIX
+ }
+ ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
+ }
+ return
+
+}
+
+func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string,
+ headers map[string][]string, isObs bool) (ret map[string]string) {
+
+ if signature == "v4" {
+ conf.signature = SignatureV4
+
+ longDate, ok := params[PARAM_DATE_AMZ_CAMEL]
+ if !ok {
+ longDate = params[HEADER_DATE_AMZ]
+ }
+ shortDate := longDate[:8]
+
+ credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL]
+ if !ok {
+ credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)]
+ }
+
+ _credential := UrlDecodeWithoutError(credential)
+
+ regions := regionRegex.FindStringSubmatch(_credential)
+ var region string
+ if len(regions) >= 2 {
+ region = regions[1]
+ }
+
+ _, scope := getCredential(ak, region, shortDate)
+
+ expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL]
+ if !ok {
+ expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)]
+ }
+
+ signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL]
+ if !ok {
+ signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)]
+ }
+
+ algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL]
+ if !ok {
+ algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)]
+ }
+
+ if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok {
+ delete(params, PARAM_SIGNATURE_AMZ_CAMEL)
+ } else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok {
+ delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL))
+ }
+
+ ret = make(map[string]string, 6)
+ ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm
+ ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
+ ret[PARAM_DATE_AMZ_CAMEL] = longDate
+ ret[PARAM_EXPIRES_AMZ_CAMEL] = expires
+ ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders
+
+ requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
+ parsedRequestURL, _err := url.Parse(requestURL)
+ if _err != nil {
+ doLog(LEVEL_WARN, "Failed to parse requestUrl")
+ return nil
+ }
+ stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers)
+ ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false)
+ } else if signature == "v2" {
+ if isObs {
+ conf.signature = SignatureObs
+ } else {
+ conf.signature = SignatureV2
+ }
+ _, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
+ expires, ok := params["Expires"]
+ if !ok {
+ expires = params["expires"]
+ }
+ headers[HEADER_DATE_CAMEL] = []string{expires}
+ stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
+ ret = make(map[string]string, 3)
+ ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false)
+ ret["AWSAccessKeyId"] = UrlEncode(ak, false)
+ ret["Expires"] = UrlEncode(expires, false)
+ }
+
+ return
+}