38 Commits

Author SHA1 Message Date
  ychao 063d3a4588 Merge pull request 'dataset-urchin' (#7154) from dataset-urchin into V20251211 1 week ago
  chenyifan01 b5fa3db6bf Merge pull request 'V20251211' (#7153) from V20251211 into dataset-urchin 1 week ago
  zhoupzh 17ecc73fb7 fix issue 1 week ago
  chenyifan01 787be07a05 fix bug 1 week ago
  chenyifan01 0f981fdb78 fix bug 1 week ago
  chenyifan01 aee234b43a Merge branch 'dataset-urchin' of https://openi.pcl.ac.cn/OpenI/aiforge into dataset-urchin 1 week ago
  chenyifan01 7ca5f3c1be fix bug 1 week ago
  chenyifan01 fd07a766b2 Merge pull request 'V20251127' (#7144) from V20251127 into dataset-urchin 1 week ago
  chenyifan01 51c189bde3 fix bug 1 week ago
  chenyifan01 91b2b66f18 fix bug 1 week ago
  chenyifan01 b3928bb03a 添加删除海胆缓存逻辑 2 weeks ago
  chenyifan01 488d2cc08c Merge remote-tracking branch 'origin/V20251127' into dataset-urchin 2 weeks ago
  chenyifan01 8e9aa6225f Merge remote-tracking branch 'origin/V20251127' into dataset-urchin 2 weeks ago
  chenyifan01 bf40b9ec64 fix bug 2 weeks ago
  chenyifan01 f73c2a765c 修改日志打印 2 weeks ago
  chenyifan01 46e4e1d44f Merge branch 'dataset-urchin' of https://openi.pcl.ac.cn/OpenI/aiforge into dataset-urchin 3 weeks ago
  chenyifan01 6b35d46fec add log 3 weeks ago
  chenzh 2bd08d7cb6 fix aimodel storage type settings 4 weeks ago
  chenzh ab49056612 海胆2.0 模型部分修改 4 weeks ago
  Gitea 31d761e83c Merge branch 'V20251113' into dataset-urchin 4 weeks ago
  chenyifan01 101ce7f6a9 海胆2.0 1 month ago
  chenyifan01 e193832915 海胆2.0 1 month ago
  chenyifan01 8ec7fe5ac6 海胆2.0 1 month ago
  chenyifan01 a04f868990 海胆2.0 1 month ago
  chenyifan01 de3ef7b137 海胆2.0 1 month ago
  chenyifan01 e70b6e73b9 海胆2.0 1 month ago
  chenyifan01 6ff5cfdf47 海胆2.0 2 months ago
  chenyifan01 49d37f6c7c Merge pull request 'V20251016' (#7003) from V20251016 into dataset-urchin 2 months ago
  chenyifan01 9ac8b1fbf5 海胆2.0 2 months ago
  chenyifan01 8552926c56 海胆2.0 2 months ago
  chenyifan01 5ee714c735 海胆2.0 2 months ago
  chenyifan01 2c9198f226 海胆2.0 2 months ago
  chenyifan01 28268fcae4 海胆2.0 2 months ago
  chenyifan01 d000b3e011 海胆2.0 2 months ago
  chenyifan01 eb721b22b1 海胆2.0 2 months ago
  testdasdsadasdas0 f231405100 海胆2.0 2 months ago
  testdasdsadasdas0 c92545a3f9 海胆2.0 2 months ago
  testdasdsadasdas0 401345697e 海胆2.0 2 months ago
84 changed files with 18007 additions and 1115 deletions
Split View
  1. +3
    -1
      entity/container.go
  2. +3
    -2
      entity/storage.go
  3. +3
    -2
      go.mod
  4. +2
    -0
      go.sum
  5. +4
    -0
      manager/client/grampus/grampus.go
  6. +2
    -2
      models/ai_model_manage.go
  7. +1
    -0
      models/cloudbrain.go
  8. +14
    -2
      models/dataset_registry.go
  9. +14
    -2
      models/hf_model.go
  10. +18
    -0
      modules/setting/setting.go
  11. +1
    -0
      modules/storage/obs.go
  12. +7
    -4
      modules/storage/storage.go
  13. +579
    -0
      modules/urchin_v2/client.go
  14. +1637
    -0
      modules/urchin_v2/client/urchin_client.go
  15. +12
    -0
      modules/urchin_v2/common/error.go
  16. +116
    -0
      modules/urchin_v2/common/http.go
  17. +1190
    -0
      modules/urchin_v2/module/urchin.go
  18. +2
    -1
      options/locale/locale_en-US.ini
  19. +1
    -0
      options/locale/locale_zh-CN.ini
  20. +12
    -11
      routers/api/v1/aimodel/aimodel.go
  21. +6
    -6
      routers/api/v1/dataset/dataset.go
  22. +3
    -2
      routers/response/response_list.go
  23. +1
    -0
      services/ai_task_service/cluster/c2net.go
  24. +1
    -0
      services/ai_task_service/container_builder/dataset_builder.go
  25. +63
    -62
      services/ai_task_service/container_builder/pre_model_builder.go
  26. +35
    -1
      services/ai_task_service/storage_helper/client.go
  27. +49
    -4
      services/ai_task_service/storage_helper/minio.go
  28. +46
    -0
      services/ai_task_service/storage_helper/obs.go
  29. +545
    -0
      services/ai_task_service/storage_helper/urchin_v2.go
  30. +39
    -23
      services/subject_service/aimodel_service.go
  31. +7
    -1
      services/subject_service/aimodel_uploader_helper.go
  32. +27
    -40
      services/subject_service/dataset_service.go
  33. +7
    -1
      services/subject_service/dataset_uploader_helper.go
  34. +6
    -10
      services/subject_service/upload_service.go
  35. +1
    -0
      vendor/gitea.com/macaron/csrf/csrf.go
  36. +1
    -0
      vendor/gitea.com/macaron/csrf/xsrf.go
  37. +0
    -420
      vendor/github.com/golang/mock/gomock/call.go
  38. +0
    -108
      vendor/github.com/golang/mock/gomock/callset.go
  39. +0
    -264
      vendor/github.com/golang/mock/gomock/controller.go
  40. +0
    -141
      vendor/github.com/golang/mock/gomock/matchers.go
  41. +201
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE
  42. +347
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go
  43. +55
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go
  44. +136
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go
  45. +49
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/callback.go
  46. +68
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go
  47. +869
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go
  48. +571
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go
  49. +49
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go
  50. +257
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go
  51. +59
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go
  52. +563
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go
  53. +329
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go
  54. +1313
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go
  55. +36
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go
  56. +103
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go
  57. +689
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go
  58. +394
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go
  59. +404
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/mime.go
  60. +407
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go
  61. +437
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go
  62. +33
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go
  63. +433
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go
  64. +73
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go
  65. +174
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go
  66. +68
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go
  67. +542
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go
  68. +108
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/progress.go
  69. +242
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go
  70. +65
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go
  71. +124
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go
  72. +758
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go
  73. +154
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_base.go
  74. +352
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_bucket.go
  75. +535
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_object.go
  76. +75
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_other.go
  77. +142
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_part.go
  78. +925
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go
  79. +372
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/type.go
  80. +661
    -0
      vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go
  81. +370
    -0
      vendor/golang.org/x/net/http/httpproxy/proxy.go
  82. +4
    -2
      vendor/modules.txt
  83. +1
    -1
      vendor/xorm.io/xorm/dialects/postgres.go
  84. +2
    -2
      web_src/vuepages/pages/guide/components/FileUpload.vue

+ 3
- 1
entity/container.go View File

@@ -22,7 +22,9 @@ type ContainerData struct {
IsOverwrite bool `json:"isOverwrite"`
IsNeedUnzip bool `json:"isNeedUnzip"`
IsNeedTensorboard bool `json:"isNeedTensorboard"`
StorageType StorageType
Id string `json:"id"`

StorageType StorageType
}

type ContainerDataType string


+ 3
- 2
entity/storage.go View File

@@ -5,8 +5,9 @@ import "code.gitea.io/gitea/models"
type StorageType string

const (
MINIO StorageType = "MINIO"
OBS StorageType = "OBS"
MINIO StorageType = "MINIO"
OBS StorageType = "OBS"
URCHIN_V2 StorageType = "URCHIN_V2"
)

func GetStorageTypeFromCloudbrainType(cloudbrainType int) StorageType {


+ 3
- 2
go.mod View File

@@ -50,10 +50,13 @@ require (
github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14
github.com/gomodule/redigo v2.0.0+incompatible
github.com/google/go-github/v24 v24.0.1
github.com/google/go-querystring v1.0.0
github.com/google/uuid v1.1.1
github.com/gorilla/context v1.1.1
github.com/gorilla/websocket v1.4.0
github.com/hashicorp/go-retryablehttp v0.6.6
github.com/huandu/xstrings v1.3.0
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.9+incompatible
github.com/issue9/identicon v1.0.1
github.com/jaytaylor/html2text v0.0.0-20160923191438-8fb95d837f7d
github.com/kballard/go-shellquote v0.0.0-20170619183022-cd60e84ee657
@@ -182,7 +185,6 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-querystring v1.0.0 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/handlers v1.4.2 // indirect
@@ -190,7 +192,6 @@ require (
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/sessions v1.2.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
github.com/hashicorp/go-retryablehttp v0.6.6 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/issue9/assert v1.3.2 // indirect


+ 2
- 0
go.sum View File

@@ -439,6 +439,8 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.0 h1:gvV6jG9dTgFEncxo+AF7PH6MZXi/vZl25owA/8Dg8Wo=
github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.9+incompatible h1:T9+wBrjfJUrWKppRwXhDNjf6vAJy7DfZYWgkjNbxkIU=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.9+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=


+ 4
- 0
manager/client/grampus/grampus.go View File

@@ -101,6 +101,10 @@ func getToken() error {
return nil
}

func GetToken() error {
return getToken()
}

func CreateInferenceJob(req models.CreateGrampusInferenceRequest) (*models.GrampusNotebookResponse, error) {
checkSetting()
client := getRestyClient()


+ 2
- 2
models/ai_model_manage.go View File

@@ -1158,7 +1158,7 @@ func GetAimodelByID(id string) (*AiModelManage, error) {
return aimodel, nil
}

func DeleteAimodel(doer *User, uid int64, aimodelId string) error {
func DeleteAimodel(ctx DBContext, doer *User, uid int64, aimodelId string) error {
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
@@ -1170,7 +1170,7 @@ func DeleteAimodel(doer *User, uid int64, aimodelId string) error {
}
}

sess := x
sess := ctx.e
aimodel := &AiModelManage{ID: aimodelId, OwnerID: uid}
has, err := sess.Get(aimodel)
if err != nil {


+ 1
- 0
models/cloudbrain.go View File

@@ -2431,6 +2431,7 @@ type GrampusDataset struct {
IsOverwrite bool `json:"isOverwrite"`
IsNeedUnzip bool `json:"isNeedUnzip"`
IsNeedTensorboard bool `json:"isNeedTensorboard"`
Id string `json:"id"`
}

type CreateGrampusJobRequest struct {


+ 14
- 2
models/dataset_registry.go View File

@@ -254,6 +254,17 @@ func CreateDatasetRegistry(dataset *DatasetRegistry, doer *User) error {
return nil
}

func UpdateDatasetPathByID(path string, datasetId string) error {
_, err := x.ID(datasetId).Cols("path").Update(&DatasetRegistry{
Path: path,
})
if err != nil {
return err
}

return nil
}

func CreateDatasetRegistry4Old(dataset *DatasetRegistry, doer *User) error {
var err error
sess := x.NewSession()
@@ -332,7 +343,7 @@ func (dataset *DatasetRegistry) ConvertSubjectAccessContext() *SubjectAccessCont
}
}

func DeleteDatasetRegistry(doer *User, uid int64, datasetID string) error {
func DeleteDatasetRegistry(ctx DBContext, doer *User, uid int64, datasetID string) error {
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
@@ -344,7 +355,8 @@ func DeleteDatasetRegistry(doer *User, uid int64, datasetID string) error {
}
}

sess := x
sess := ctx.e

dataset := &DatasetRegistry{ID: datasetID, OwnerID: uid}
has, err := sess.Get(dataset)
if err != nil {


+ 14
- 2
models/hf_model.go View File

@@ -1,11 +1,12 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"fmt"
"strings"
"time"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)

@@ -349,6 +350,17 @@ func DeleteHfFilesByModelId(id string) error {
return nil
}

func DeleteHfFilesByModelIdWithContext(ctx DBContext, id string) error {
sess := ctx.e
_, err := sess.Delete(&HfModelFile{
ModelId: id,
})
if err != nil {
return err
}
return nil
}

func GetHfModelSizeByModelId(modelId string) (int64, error) {
sess := x.NewSession()
defer sess.Close()


+ 18
- 0
modules/setting/setting.go View File

@@ -600,6 +600,11 @@ var (
//storage default config
StorageDefaultType string

//urchin config
UrchinAddress string
UrchinReqTimeoutSecond int64
UrchinMaxConnection int

//modelarts config
ModelArtsHost string
IamHost string
@@ -792,6 +797,10 @@ var (
MAX_PREVIEW_FILE_SIZE int64
ENABLE_ORGANIZATION_UPLOAD_DATASET_FILE bool
HOT_DATASET_LIST_URL string
DATASET_STORAGE_TYPE string

// aimodel
AIMODEL_STORAGE_TYPE string

//resource queue
UseNewSyncQueueAPI bool
@@ -1752,6 +1761,10 @@ func NewContext() {
MAX_PREVIEW_FILE_SIZE = sec.Key("MAX_PREVIEW_FILE_SIZE").MustInt64(100 * 1024 * 1024)
ENABLE_ORGANIZATION_UPLOAD_DATASET_FILE = sec.Key("ENABLE_ORGANIZATION_UPLOAD_DATASET_FILE").MustBool(false)
HOT_DATASET_LIST_URL = sec.Key("HOT_DATASET_LIST_URL").MustString("")
DATASET_STORAGE_TYPE = sec.Key("DATASET_STORAGE_TYPE").MustString("")

sec = Cfg.Section("model")
AIMODEL_STORAGE_TYPE = sec.Key("AIMODEL_STORAGE_TYPE").MustString("")

sec = Cfg.Section("benchmark")
IsBenchmarkEnabled = sec.Key("ENABLED").MustBool(false)
@@ -1810,6 +1823,11 @@ func NewContext() {
UserBasePath = sec.Key("BASE_PATH_USER").MustString("users/")
PROXYURL = sec.Key("PROXY_URL").MustString("")

sec = Cfg.Section("urchin")
UrchinAddress = sec.Key("URCHIN_ADDRESS").MustString("")
UrchinReqTimeoutSecond = sec.Key("URCHIN_REQ_TIMEOUT_SECOND").MustInt64(0)
UrchinMaxConnection = sec.Key("URCHIN_MAX_CONNECTION").MustInt(0)

sec = Cfg.Section("storage")
StorageDefaultType = sec.Key("DEFAULT").MustString("")
if StorageDefaultType == "" {


+ 1
- 0
modules/storage/obs.go View File

@@ -28,6 +28,7 @@ type FileInfo struct {
ParenDir string `json:"ParenDir"`
UUID string `json:"UUID"`
RelativePath string `json:"RelativePath"`
FullPath string `json:"FullPath"`
IsSupportPrivew bool `json:"IsSupportPreview"`
}
type FileInfoList []FileInfo


+ 7
- 4
modules/storage/storage.go View File

@@ -12,6 +12,7 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/urchin_v2"
"github.com/minio/minio-go"
)

@@ -48,9 +49,10 @@ func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, sr

var (
// Attachments represents attachments storage
Attachments ObjectStorage
ObsCli *obs.ObsClient
MinioCore *minio.Core
Attachments ObjectStorage
ObsCli *obs.ObsClient
MinioCore *minio.Core
UrchinClient *urchin_v2.UrchinV2Client
)

// Init init the stoarge
@@ -90,7 +92,8 @@ func Init() error {
//return err
}
log.Info("obs cli inited.")

UrchinClient = urchin_v2.NewUrchinClient(setting.UrchinAddress, setting.UrchinReqTimeoutSecond, setting.UrchinMaxConnection)
log.Info("urchin client inited.")
return nil
}



+ 579
- 0
modules/urchin_v2/client.go View File

@@ -0,0 +1,579 @@
package urchin_v2

import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"

"code.gitea.io/gitea/modules/log"
urchin_client "code.gitea.io/gitea/modules/urchin_v2/client"
urchin_module "code.gitea.io/gitea/modules/urchin_v2/module"
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
)

type UrchinV2Client struct {
UrchinClient urchin_client.UrchinClient
Address string
ObsClient *obs.ObsClient
}

func NewUrchinClient(address string,
reqTimeout int64,
maxConnection int) *UrchinV2Client {

var obsClientSocketTimeout,
obsClientMaxConnection,
obsClientMaxRetryCount = 3600, 100, 3
obsClient, _ := obs.New(
"",
"",
"magicalParam",
obs.WithSignature(obs.SignatureObs),
obs.WithSocketTimeout(obsClientSocketTimeout),
obs.WithMaxConnections(obsClientMaxConnection),
obs.WithMaxRetryCount(obsClientMaxRetryCount))

var urchinClient urchin_client.UrchinClient

urchinClient.Init(context.Background(), address, reqTimeout, maxConnection)
return &UrchinV2Client{
UrchinClient: urchinClient,
Address: address,
ObsClient: obsClient,
}
}

func (client *UrchinV2Client) CreateCollection(name string) (string, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", name)

createObjectReq := new(urchin_module.CreateObjectReq)
createObjectReq.Name = name
err, createObjectResp := client.UrchinClient.CreateObject(ctx, createObjectReq)
if nil != err {
return "", err
}
if !createObjectResp.IsSuccess() {
return "", createObjectResp.ToError()
}
return createObjectResp.ObjUuid, nil
}

func (client *UrchinV2Client) CreatePutObjectSignedUrl(objUuid string, relativePath string) (string, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objUuid)
req := &urchin_module.CreatePutObjectSignedUrlReqWithoutTask{
ObjUuid: objUuid,
Source: relativePath,
}
err, resp := client.UrchinClient.CreatePutObjectSignedUrlWithoutTask(
ctx,
req)
if nil != err {
return "", err
}
if !resp.IsSuccess() {
return "", resp.ToError()
}

return resp.SignedUrl, nil

}

func (client *UrchinV2Client) UploadLocalDir(objUuid string, localPath, targetRelativePath string) error {
files, err := readDir(localPath)
if err != nil {
log.Error("readDir(%s) failed: %s", localPath, err.Error())
return err
}

for _, file := range files {
if file.IsDir() {
key := targetRelativePath + file.Name() + "/"
err = client.MKDIR(objUuid, key)
// _, err = storage.ObsCli.PutObject(input)
if err != nil {
log.Error("PutObject(%s) failed: %s", key, err.Error())
return err
}

if err = client.UploadLocalDir(objUuid, localPath+file.Name()+"/", targetRelativePath+file.Name()+"/"); err != nil {
log.Error("UploadLocalDir(%s) failed: %s", file.Name(), err.Error())
return err
}
} else {
filePath := localPath + file.Name()
err = client.UploadLocalFile(objUuid, filePath, targetRelativePath+file.Name()+"/"+file.Name())
if err != nil {
log.Error("PutFile(%s) failed: %s", filePath, err.Error())
return err
}
}
}
return nil

}

func (client *UrchinV2Client) UploadLocalFile(objUuid string, localPath, targetRelativePath string) error {
abs, err := filepath.Abs(filepath.Clean(localPath))
if err != nil {
return err
}

fi, err := os.Stat(abs)
if err != nil {
return err
}
if !fi.Mode().IsRegular() {
log.Error("[%s]%s not a regular file", objUuid, localPath)
return nil
}

f, err := os.Open(abs)
if err != nil {
return err
}
defer f.Close()

err = client.UploadFile(objUuid, targetRelativePath, f)
if err != nil {
return err
}
return nil

}

func readDir(dirname string) ([]os.FileInfo, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}

list, err := f.Readdir(0)
f.Close()
if err != nil {
//todo: can not upload empty folder
if err == io.EOF {
return nil, nil
}
return nil, err
}

//sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
return list, nil
}

func (client *UrchinV2Client) UploadFile(objUuid string, targetRelativePath string, r io.Reader) error {
url, err := client.CreatePutObjectSignedUrl(objUuid, targetRelativePath)
if err != nil {
return err
}

_, err = client.ObsClient.PutObjectWithSignedUrl(
url,
http.Header{},
r)
if nil != err {
return err
}
return nil
}

func (client *UrchinV2Client) MKDIR(objUuid, path string) error {
url, err := client.CreatePutObjectSignedUrl(objUuid, path)
if err != nil {
return err
}

_, err = client.ObsClient.PutObjectWithSignedUrl(
url,
http.Header{},
nil)
if nil != err {
return err
}
return nil
}

func (client *UrchinV2Client) ReportSizeChanged(objUuid string) error {
ctx := context.Background()
err, _ := client.UrchinClient.ReportSizeChanged(ctx, &urchin_module.ReportSizeChangedReq{
ObjUuid: objUuid,
})
return err
}

func (client *UrchinV2Client) DeleteObjectDeployment(objUuid string) error {
ctx := context.Background()
var force = new(bool)
*force = true
err, resp := client.UrchinClient.DeleteObjectDeployment(ctx, &urchin_module.DeleteObjectDeploymentReq{
ObjUuid: objUuid,
Force: force,
})
if !resp.IsSuccess() {
return resp.ToError()
}
return err
}

func (client *UrchinV2Client) ListPrefixObjectsWithMarkerAndDelimeter(objuuid, prefix, marker string, maxKey int32) (*urchin_module.ListObjectsResp, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)

var delimeter = "/"
req := &urchin_module.ListObjectsReq{
ObjUuid: objuuid,
Prefix: GetStringPointer(prefix),
Marker: GetStringPointer(marker),
MaxKeys: &maxKey,
Delimiter: &delimeter,
}
err, resp := client.UrchinClient.ListObjects(ctx, req)
if nil != err {
return nil, err
}
if !resp.IsSuccess() {
return nil, resp.ToError()
}
return resp, err
}

func GetStringPointer(s string) *string {
if s == "" {
return nil
}
return &s
}

func (client *UrchinV2Client) GetObject(objuuid, path string) (*obs.GetObjectOutput, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)

err, objectResp := client.UrchinClient.CreateGetObjectSignedUrlWithoutTask(ctx, &urchin_module.CreateGetObjectSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: path,
})
if err != nil {
return nil, err
}
if !objectResp.IsSuccess() {
return nil, objectResp.ToError()
}
output, err := client.ObsClient.GetObjectWithSignedUrl(
objectResp.SignedUrl,
http.Header{})
if nil != err {
return nil, err
}

if err != nil {
return nil, err
}
return output, nil
}

func (client *UrchinV2Client) GetObjectMeta(objuuid, path string) (*obs.GetObjectMetadataOutput, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
err, objectResp := client.UrchinClient.CreateGetObjectMetadataSignedUrlWithoutTask(ctx, &urchin_module.CreateGetObjectMetadataSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: path,
})
if err != nil {
return nil, err
}
if !objectResp.IsSuccess() {
return nil, objectResp.ToError()
}
output, err := client.ObsClient.GetObjectMetadataWithSignedUrl(
objectResp.SignedUrl,
http.Header{})

if output == nil {
return nil, fmt.Errorf("GetObjectMeta(%s:%s) failed", objuuid, path)
}
return output, nil
}

const (
TASKSUCCESS int32 = 1
TASKFAILED int32 = 2
)

func (client *UrchinV2Client) FinishSuccessTask(taskId int32) error {
ctx := context.Background()
req := &urchin_module.FinishTaskReq{
TaskId: taskId,
Result: TASKSUCCESS,
}
err, resp := client.UrchinClient.FinishTask(ctx, req)
if err != nil {
return err
}
if !resp.IsSuccess() {
return resp.ToError()
}
return nil
}

func (client *UrchinV2Client) FinishFailedTask(taskId int32) error {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", fmt.Sprint(taskId))

req := &urchin_module.FinishTaskReq{
TaskId: taskId,
Result: TASKFAILED,
}
err, resp := client.UrchinClient.FinishTask(ctx, req)
if err != nil {
return err
}
if !resp.IsSuccess() {
return resp.ToError()
}
return nil
}

func (client *UrchinV2Client) GetObjectSignedUrl(objuuid, path string) (string, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)

err, objectResp := client.UrchinClient.CreateGetObjectSignedUrlWithoutTask(ctx, &urchin_module.CreateGetObjectSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: path,
})
if err != nil {
return "", err
}
if !objectResp.IsSuccess() {
return "", objectResp.ToError()
}
return objectResp.SignedUrl, nil
}

func (client *UrchinV2Client) ListPrefixObjectsWithMarker(objuuid string, marker string, prefix string, maxKey int32) (*urchin_module.ListObjectsResp, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)

req := &urchin_module.ListObjectsReq{
ObjUuid: objuuid,
Prefix: GetStringPointer(prefix),
Marker: GetStringPointer(marker),
MaxKeys: &maxKey,
}
err, resp := client.UrchinClient.ListObjects(ctx, req)
if nil != err {
return nil, err
}
if !resp.IsSuccess() {
return nil, resp.ToError()
}
return resp, err
}

func (client *UrchinV2Client) CopyObject(sourceObjUuid, sourcePath, targetObjUuid, targetPath string) error {
////先下载再上传,海胆后续版本会支持直接copy

res, err := client.GetObject(sourceObjUuid, sourcePath)
if err != nil {
return err
}
return client.UploadFile(targetObjUuid, targetPath, res.Body)
}

func (client *UrchinV2Client) DeleteFile(objuuid, sourcePath string) error {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
req := &urchin_module.DeleteFileReq{
ObjUuid: objuuid,
Source: sourcePath,
}
err, resp := client.UrchinClient.DeleteFile(ctx, req)
if err != nil {
return err
}
if !resp.IsSuccess() {
return resp.ToError()
}

return nil
}

func (client *UrchinV2Client) DeleteObject(objuuid string) error {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
req := &urchin_module.DeleteObjectReq{
ObjUuid: objuuid,
}
err, resp := client.UrchinClient.DeleteObject(ctx, req)
if err != nil {
return err
}
if !resp.IsSuccess() {
return resp.ToError()
}

return nil
}

func dirOf(name string) string {
c := filepath.Clean(name)
if !strings.ContainsRune(c, filepath.Separator) {
return "/"
}
return filepath.Dir(c)
}

func (client *UrchinV2Client) InitiateMultipartUpload(objuuid, path string) (string, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
err, objectResp := client.UrchinClient.CreateInitiateMultipartUploadSignedUrlWithoutTask(ctx, &urchin_module.CreateInitiateMultipartUploadSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: path,
})
if err != nil {
return "", err
}
if !objectResp.IsSuccess() {
return "", objectResp.ToError()
}
output, err := client.ObsClient.InitiateMultipartUploadWithSignedUrl(
objectResp.SignedUrl,
http.Header{})

if err != nil {
return "", err
}
// client.UrchinClient.FinishTask()
return output.UploadId, nil
}

func (client *UrchinV2Client) CreateMultipartUploadSignedUrl(objuuid, uploadId, source string, partNumber int) (string, error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
err, output := client.UrchinClient.CreateUploadPartSignedUrlWithoutTask(ctx, &urchin_module.CreateUploadPartSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: source,
UploadId: uploadId,
PartNumber: int32(partNumber),
})
if err != nil {
return "", err
}
if !output.IsSuccess() {
return "", output.ToError()
}
return output.SignedUrl, nil
}

func (client *UrchinV2Client) CompleteMultiPartUpload(objuuid, uploadId, source string, totalChunks int) error {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)
err, urlOutput := client.UrchinClient.CreateCompleteMultipartUploadSignedUrlWithoutTask(ctx, &urchin_module.CreateCompleteMultipartUploadSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: source,
UploadId: uploadId,
})
if err != nil {
return err
}
if !urlOutput.IsSuccess() {
return urlOutput.ToError()
}

allParts, err := client.ListAllParts(objuuid, uploadId, source)
if err != nil {
return err
}

if len(allParts.Parts) != totalChunks {
log.Error("listAllParts number(%d) is not equal the set total chunk number(%d)", len(allParts.Parts), totalChunks)
return errors.New("the parts is not complete")
}

type completePart struct {
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
}

var parts []completePart
for _, p := range allParts.Parts {
parts = append(parts, completePart{
PartNumber: p.PartNumber,
ETag: p.ETag,
})
}

xmlBody, _ := xml.MarshalIndent(struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []completePart `xml:"Part"`
}{
Parts: parts,
}, "", " ")
data := bytes.NewReader(xmlBody)
output, _ := client.ObsClient.CompleteMultipartUploadWithSignedUrl(
urlOutput.SignedUrl,
http.Header{}, data)

if output == nil {
return fmt.Errorf("CompleteMultipartUploadWithSignedUrl output empty")
}

return nil
}

func (client *UrchinV2Client) ListAllParts(objuuid, uploadID, path string) (output *obs.ListPartsOutput, err error) {
ctx := context.Background()
ctx = context.WithValue(ctx, "X-Request-Id", objuuid)

output = &obs.ListPartsOutput{}
var partNumberMarker int32 = 0
var maxListParts int32 = 1000

for {

err, objectResp := client.UrchinClient.CreateListPartsSignedUrlWithoutTask(ctx, &urchin_module.CreateListPartsSignedUrlReqWithoutTask{
ObjUuid: objuuid,
Source: path,
UploadId: uploadID,
PartNumberMarker: &partNumberMarker,
MaxParts: &maxListParts,
})
if err != nil {
return nil, err
}
if !objectResp.IsSuccess() {
return nil, objectResp.ToError()
}
temp, err := client.ObsClient.ListPartsWithSignedUrl(
objectResp.SignedUrl,
http.Header{})

if err != nil {
return nil, err
}
partNumberMarker = int32(temp.NextPartNumberMarker)
for _, partInfo := range temp.Parts {
output.Parts = append(output.Parts, obs.Part{
PartNumber: partInfo.PartNumber,
ETag: partInfo.ETag,
})
}

if !temp.IsTruncated {
break
} else {
continue
}
}

return output, nil
}

+ 1637
- 0
modules/urchin_v2/client/urchin_client.go View File

@@ -0,0 +1,1637 @@
package client

import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net"
"net/http"
"strings"
"time"

"code.gitea.io/gitea/modules/setting"
. "code.gitea.io/gitea/modules/urchin_v2/common"
. "code.gitea.io/gitea/modules/urchin_v2/module"
"github.com/go-resty/resty/v2"
"github.com/google/go-querystring/query"
"github.com/hashicorp/go-retryablehttp"
)

var UClient UrchinClient

type UrchinClient struct {
addr string
header http.Header
urchinClient *retryablehttp.Client
}

func (u *UrchinClient) Init(
ctx context.Context,
address string,
reqTimeout int64,
maxConnection int) {

u.addr = address

timeout := time.Duration(reqTimeout) * time.Second

transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: func(
ctx context.Context,
network,
addr string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 3 * time.Second, // 连接超时时间
KeepAlive: 30 * time.Second, // 保持连接时长
}
return dialer.DialContext(ctx, network, addr)
},
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
IdleConnTimeout: 90 * time.Second,
MaxIdleConnsPerHost: maxConnection,
}

u.urchinClient = retryablehttp.NewClient()
u.urchinClient.RetryMax = 3
u.urchinClient.RetryWaitMin = 1 * time.Second
u.urchinClient.RetryWaitMax = 5 * time.Second
u.urchinClient.HTTPClient.Transport = transport
u.urchinClient.HTTPClient.Timeout = timeout

u.header = make(http.Header)

u.GetToken()

}

type GetTokenResult struct {
Token string `json:"token"`
Expiration int64 `json:"expiration"`
}

type GetTokenParams struct {
UserName string `json:"username"`
Password string `json:"password"`
}

const errorIllegalToken = 1005

func (u *UrchinClient) GetToken() error {
HOST = strings.TrimSuffix(setting.Grampus.Host, "/")

client := getRestyClient()
params := GetTokenParams{
UserName: setting.Grampus.UserName,
Password: setting.Grampus.Password,
}

var result GetTokenResult
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
SetResult(&result).
Post(strings.TrimSuffix(setting.Grampus.Host, "/") + "/openapi/v1/token")
if err != nil {
return fmt.Errorf("resty getToken: %v", err)
}

if res.StatusCode() != http.StatusOK {
return fmt.Errorf("getToken failed:%s", res.String())
}
u.header.Set(UrchinClientHeaderToken, result.Token)
return nil
}

var (
restyClient *resty.Client
HOST string
TOKEN string
)

func getRestyClient() *resty.Client {
if restyClient == nil {
restyClient = resty.New()
restyClient.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true})
}
return restyClient
}

func (u *UrchinClient) CreateInitiateMultipartUploadSignedUrl(
ctx context.Context,
req *CreateInitiateMultipartUploadSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateInitiateMultipartUploadSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateInitiateMultipartUploadSignedUrlWithoutTask(
ctx context.Context,
req *CreateInitiateMultipartUploadSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateInitiateMultipartUploadSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateUploadPartSignedUrl(
ctx context.Context,
req *CreateUploadPartSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateUploadPartSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateUploadPartSignedUrlWithoutTask(
ctx context.Context,
req *CreateUploadPartSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateUploadPartSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateListPartsSignedUrl(
ctx context.Context,
req *CreateListPartsSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateListPartsSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateListPartsSignedUrlWithoutTask(
ctx context.Context,
req *CreateListPartsSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateListPartsSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateCompleteMultipartUploadSignedUrl(
ctx context.Context,
req *CreateCompleteMultipartUploadSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateCompleteMultipartUploadSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateCompleteMultipartUploadSignedUrlWithoutTask(
ctx context.Context,
req *CreateCompleteMultipartUploadSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateCompleteMultipartUploadSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateAbortMultipartUploadSignedUrl(
ctx context.Context,
req *CreateAbortMultipartUploadSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateAbortMultipartUploadSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreatePutObjectSignedUrl(
ctx context.Context,
req *CreatePutObjectSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreatePutObjectSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreatePutObjectSignedUrlWithoutTask(
ctx context.Context,
req *CreatePutObjectSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreatePutObjectSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateGetObjectMetadataSignedUrl(
ctx context.Context,
req *CreateGetObjectMetadataSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateGetObjectMetadataSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateGetObjectMetadataSignedUrlWithoutTask(
ctx context.Context,
req *CreateGetObjectMetadataSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateGetObjectMetadataSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateGetObjectSignedUrl(
ctx context.Context,
req *CreateGetObjectSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateGetObjectSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateGetObjectSignedUrlWithoutTask(
ctx context.Context,
req *CreateGetObjectSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateGetObjectSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateListObjectsSignedUrl(
ctx context.Context,
req *CreateListObjectsSignedUrlReq) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateListObjectsSignedUrlInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateListObjectsSignedUrlWithoutTask(
ctx context.Context,
req *CreateListObjectsSignedUrlReqWithoutTask) (
err error, resp *CreateSignedUrlResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateSignedUrlResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateListObjectsSignedUrlInterfaceWithoutTask,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) UploadObject(
ctx context.Context,
req *UploadObjectReq) (
err error, resp *UploadObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(UploadObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientUploadObjectInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) UploadFile(
ctx context.Context,
req *UploadFileReq) (
err error, resp *UploadFileResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(UploadFileResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientUploadFileInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) DownloadObject(
ctx context.Context,
req *DownloadObjectReq) (
err error, resp *DownloadObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(DownloadObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientDownloadObjectInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) DownloadFile(
ctx context.Context,
req *DownloadFileReq) (
err error, resp *DownloadFileResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(DownloadFileResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientDownloadFileInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) LoadObject(
ctx context.Context,
req *LoadObjectReq) (
err error, resp *LoadObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(LoadObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientLoadObjectInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) MigrateObject(
ctx context.Context,
req *MigrateObjectReq) (
err error, resp *MigrateObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(MigrateObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientMigrateObjectInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CopyObject(
ctx context.Context,
req *CopyObjectReq) (
err error, resp *CopyObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CopyObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCopyObjectInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) GetObject(
ctx context.Context,
req *GetObjectReq) (
err error, resp *GetObjectResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(GetObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientGetObjectInterface+"?"+values.Encode(),
http.MethodGet,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) DeleteObject(
ctx context.Context,
req *DeleteObjectReq) (
err error, resp *BaseResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientDeleteObjectInterface+"?"+values.Encode(),
http.MethodDelete,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) DeleteObjectDeployment(
ctx context.Context,
req *DeleteObjectDeploymentReq) (
err error, resp *BaseResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientDeleteObjectDeploymentInterface+"?"+values.Encode(),
http.MethodDelete,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) DeleteFile(
ctx context.Context,
req *DeleteFileReq) (
err error, resp *BaseResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientDeleteFileInterface+"?"+values.Encode(),
http.MethodDelete,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) CreateObject(
ctx context.Context,
req *CreateObjectReq) (
err error, resp *CreateObjectResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(CreateObjectResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientCreateObjectInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) ListObjects(
ctx context.Context,
req *ListObjectsReq) (
err error, resp *ListObjectsResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(ListObjectsResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientListObjectsInterface+"?"+values.Encode(),
http.MethodGet,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) GetObjectMetadata(
ctx context.Context,
req *GetObjectMetadataReq) (
err error, resp *GetObjectMetadataResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(GetObjectMetadataResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientGetObjectMetadataInterface+"?"+values.Encode(),
http.MethodGet,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) ListParts(
ctx context.Context,
req *ListPartsReq) (
err error, resp *ListPartsResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(ListPartsResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientListPartsInterface+"?"+values.Encode(),
http.MethodGet,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) GetTask(
ctx context.Context,
req *GetTaskReq) (
err error, resp *GetTaskResp) {

values, err := query.Values(req)
if nil != err {
return err, resp
}

resp = new(GetTaskResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientGetTaskInterface+"?"+values.Encode(),
http.MethodGet,
u.header,
nil,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) FinishTask(
ctx context.Context,
req *FinishTaskReq) (
err error, resp *BaseResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientFinishTaskInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) RetryTask(
ctx context.Context,
req *RetryTaskReq) (
err error, resp *BaseResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientRetryTaskInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) ReportTaskHeartbeat(
ctx context.Context,
req *ReportTaskHeartbeatReq) (
err error, resp *BaseResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientReportTaskHeartbeatInterface,
http.MethodPut,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

func (u *UrchinClient) ReportSizeChanged(
ctx context.Context,
req *ReportSizeChangedReq) (
err error, resp *BaseResp) {

reqBody, err := json.Marshal(req)
if nil != err {
return err, resp
}

resp = new(BaseResp)
retry := 0
sendjob:
err, respBody := Do(
ctx,
u.addr+UrchinClientReportSizeChangedInterface,
http.MethodPost,
u.header,
reqBody,
u.urchinClient)
if nil != err {
return err, resp
}

err = json.Unmarshal(respBody, resp)
if nil != err {
return err, resp
}

if SuccessCode != resp.Code {
if resp.Code == errorIllegalToken {
retry++
_ = u.GetToken()
goto sendjob
}
return nil, resp
}

return nil, resp
}

+ 12
- 0
modules/urchin_v2/common/error.go View File

@@ -0,0 +1,12 @@
package common

import (
"errors"
)

const (
SuccessCode = 0
ErrorSystem = 1
)

var ErrAbort = errors.New("AbortError")

+ 116
- 0
modules/urchin_v2/common/http.go View File

@@ -0,0 +1,116 @@
package common

import (
"context"
"encoding/json"
"io"
"net/http"
"strings"

"code.gitea.io/gitea/modules/log"
"github.com/hashicorp/go-retryablehttp"
)

func Post(
ctx context.Context,
url string,
reqBody []byte,
client *retryablehttp.Client) (err error, respBody []byte) {

respBuf, err := client.Post(
url,
"application/json; charset=UTF-8",
strings.NewReader(string(reqBody)))
if nil != err {
return err, respBody
}
defer func(body io.ReadCloser) {
err := body.Close()
if nil != err {
}
}(respBuf.Body)

respBody, err = io.ReadAll(respBuf.Body)
if nil != err {
return err, respBody
}

return nil, respBody
}

func Get(
ctx context.Context,
url string,
client *retryablehttp.Client) (err error, respBody []byte) {

respBuf, err := client.Get(url)
if nil != err {
return err, respBody
}
defer func(body io.ReadCloser) {
err := body.Close()
if nil != err {
}
}(respBuf.Body)

respBody, err = io.ReadAll(respBuf.Body)
if nil != err {
return err, respBody
}

return nil, respBody
}

func Do(
ctx context.Context,
url,
method string,
header http.Header,
reqBody interface{},
client *retryablehttp.Client) (err error, respBody []byte) {

request, err := retryablehttp.NewRequest(
method,
url,
reqBody)
if nil != err {
log.Error("urchin_v2 request(%s %s) NewRequest error. err=%v", method, url, err)
return err, respBody
}

request.Header = header
log.Info("urchin_v2 send request(%s %s) request body=%s", method, url, jsonString(reqBody))

resp, err := client.Do(request)
if nil != err {
log.Error("urchin_v2 request(%s %s) response error. err=%v", method, url, err)
return err, respBody
}
defer func(body io.ReadCloser) {
_err := body.Close()
if nil != _err {
}
}(resp.Body)

respBody, err = io.ReadAll(resp.Body)
if nil != err {
log.Error("urchin_v2 request(%s %s) io ReadAll error. err=%v", method, url, err)
return err, respBody
}
log.Info("urchin_v2 request(%s %s) response body=%s", method, url, jsonString(respBody))

return nil, respBody
}

func jsonString(v interface{}) string {
var bodyStr string
if b, ok := v.([]byte); ok {
bodyStr = string(b)
} else if s, ok := v.(string); ok {
bodyStr = s
} else if v != nil {
tmp, _ := json.Marshal(v)
bodyStr = string(tmp)
}
return bodyStr
}

+ 1190
- 0
modules/urchin_v2/module/urchin.go View File

@@ -0,0 +1,1190 @@
package module

import (
"encoding/xml"
"fmt"
"time"
)

const (
DefaultUClientReqTimeout = 10
DefaultUClientMaxConnection = 500

UrchinClientHeaderUserId = "X-User-Id"
UrchinClientHeaderToken = "X-Token"
UrchinClientHeaderquestId = "X-Request-Id"

UrchinClientCreateInitiateMultipartUploadSignedUrlInterface = "/v1/object/auth/create_init_multi_part_upload_signed_url"
UrchinClientCreateUploadPartSignedUrlInterface = "/v1/object/auth/create_upload_part_signed_url"
UrchinClientCreateListPartsSignedUrlInterface = "/v1/object/auth/create_list_parts_signed_url"
UrchinClientCreateCompleteMultipartUploadSignedUrlInterface = "/v1/object/auth/create_complete_multi_part_upload_signed_url"
UrchinClientCreateInitiateMultipartUploadSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_init_multi_part_upload_signed_url_without_task"
UrchinClientCreateUploadPartSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_upload_part_signed_url_without_task"
UrchinClientCreateListPartsSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_list_parts_signed_url_without_task"
UrchinClientCreateCompleteMultipartUploadSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_complete_multi_part_upload_signed_url_without_task"
UrchinClientCreateAbortMultipartUploadSignedUrlInterface = "/v1/object/auth/create_abort_multi_part_upload_signed_url"
UrchinClientCreatePutObjectSignedUrlInterface = "/v1/object/auth/create_put_object_signed_url"
UrchinClientCreatePutObjectSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_put_object_signed_url_without_task"
UrchinClientCreateGetObjectMetadataSignedUrlInterface = "/v1/object/auth/create_get_object_metadata_signed_url"
UrchinClientCreateGetObjectMetadataSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_get_object_metadata_signed_url_without_task"
UrchinClientCreateGetObjectSignedUrlInterface = "/v1/object/auth/create_get_object_signed_url"
UrchinClientCreateGetObjectSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_get_object_signed_url_without_task"
UrchinClientCreateListObjectsSignedUrlInterface = "/v1/object/auth/create_list_objects_signed_url"
UrchinClientCreateListObjectsSignedUrlInterfaceWithoutTask = "/v1/object/auth/create_list_objects_signed_url_without_task"
UrchinClientGetIpfsTokenInterface = "/v1/object/auth/get_ipfs_token"
UrchinClientCreateJCSPreSignedObjectListInterface = "/v1/object/auth/create_jcs_pre_signed_list"
UrchinClientCreateJCSPreSignedObjectUploadInterface = "/v1/object/auth/create_jcs_pre_signed_object_upload"
UrchinClientCreateJCSPreSignedObjectNewMultipartUploadInterface = "/v1/object/auth/create_jcs_pre_signed_new_multi_part_upload"
UrchinClientCreateJCSPreSignedObjectUploadPartInterface = "/v1/object/auth/create_jcs_pre_signed_upload_part"
UrchinClientCreateJCSPreSignedObjectCompleteMultipartUploadInterface = "/v1/object/auth/create_jcs_pre_signed_complete_multi_part_upload"
UrchinClientCreateJCSPreSignedObjectDownloadInterface = "/v1/object/auth/create_jcs_pre_signed_download"

UrchinClientCreateObjectInterface = "/v1/object/create"
UrchinClientUploadObjectInterface = "/v1/object/upload"
UrchinClientDownloadObjectInterface = "/v1/object/download"
UrchinClientLoadObjectInterface = "/v1/object/load"
UrchinClientMigrateObjectInterface = "/v1/object/migrate"
UrchinClientCopyObjectInterface = "/v1/object/copy"
UrchinClientGetObjectInterface = "/v1/object"
UrchinClientDeleteObjectInterface = "/v1/object"
UrchinClientPutObjectDeploymentInterface = "/v1/object/deployment"
UrchinClientDeleteObjectDeploymentInterface = "/v1/object/deployment"
UrchinClientListObjectsInterface = "/v1/object/list"
UrchinClientGetObjectMetadataInterface = "/v1/object/metadata"
UrchinClientListPartsInterface = "/v1/object/part/list"

UrchinClientUploadFileInterface = "/v1/object/file/upload"
UrchinClientDownloadFileInterface = "/v1/object/file/download"
UrchinClientDeleteFileInterface = "/v1/object/file"
UrchinClientReportSizeChangedInterface = "/v1/object/count"

UrchinClientGetTaskInterface = "/v1/task"
UrchinClientFinishTaskInterface = "/v1/task/finish"
UrchinClientRetryTaskInterface = "/v1/task/retry"
UrchinClientReportTaskHeartbeatInterface = "/v1/task/report_heartbeat"

StorageCategoryEIpfs = 1
StorageCategoryEObs = 2
StorageCategoryEMinio = 3
StorageCategoryEJcs = 4
StorageCategoryEEos = 5
StorageCategoryEStarLight = 6
StorageCategoryEParaCloud = 7
StorageCategoryEScow = 8
StorageCategoryESugon = 9

TaskTypeUpload = 1
TaskTypeDownload = 2
TaskTypeMigrate = 3
TaskTypeCopy = 4
TaskTypeUploadFile = 5
TaskTypeDownloadFile = 6
TaskTypeLoad = 7

TaskFResultESuccess = 1
TaskFResultEFailed = 2

UrchinSuccessCode = 0
ObjectStatusInvalidCode = 10350
)

var TaskTypeOnlyServiceRetry = map[int32]bool{
TaskTypeMigrate: true,
TaskTypeCopy: true}

type UrchinError interface {
ToError() error
IsSuccess() bool
}

type ErrObjectStatusInvalid struct {
}

func IsErrObjectStatusInvalid(err error) bool {
_, ok := err.(ErrObjectStatusInvalid)
return ok
}

func (err ErrObjectStatusInvalid) Error() string {
return fmt.Sprintf("dataset.resource_in_use")
}

type BaseResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
}

func (r *BaseResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *BaseResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type CreateInitiateMultipartUploadSignedUrlReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateInitiateMultipartUploadSignedUrlReqWithoutTask struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateUploadPartSignedUrlReq struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"part_number"
PartNumber int32 `protobuf:"varint,2,opt,name=part_number,proto3" json:"part_number"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,3,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source"`
}

type CreateUploadPartSignedUrlReqWithoutTask struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"part_number"
PartNumber int32 `protobuf:"varint,2,opt,name=part_number,proto3" json:"part_number"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source"`
}

type CreateListPartsSignedUrlReq struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
// @inject_tag: json:"max_parts"
MaxParts *int32 `protobuf:"varint,4,opt,name=max_parts,proto3,oneof" json:"max_parts"`
// @inject_tag: json:"part_number_marker"
PartNumberMarker *int32 `protobuf:"varint,5,opt,name=part_number_marker,proto3,oneof" json:"part_number_marker"`
}

type CreateListPartsSignedUrlReqWithoutTask struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
// @inject_tag: json:"max_parts"
MaxParts *int32 `protobuf:"varint,4,opt,name=max_parts,proto3,oneof" json:"max_parts"`
// @inject_tag: json:"part_number_marker"
PartNumberMarker *int32 `protobuf:"varint,5,opt,name=part_number_marker,proto3,oneof" json:"part_number_marker"`
}

type CreateCompleteMultipartUploadSignedUrlReq struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
}

type CreateCompleteMultipartUploadSignedUrlReqWithoutTask struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
}

type CreateAbortMultipartUploadSignedUrlReq struct {
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,1,opt,name=upload_id,proto3" json:"upload_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
}

type CreatePutObjectSignedUrlReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreatePutObjectSignedUrlReqWithoutTask struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateGetObjectMetadataSignedUrlReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateGetObjectMetadataSignedUrlReqWithoutTask struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,1,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateGetObjectSignedUrlReq struct {
// @inject_tag: json:"range_start"
RangeStart *int64 `protobuf:"varint,1,opt,name=range_start,proto3,oneof" json:"range_start"`
// @inject_tag: json:"range_end"
RangeEnd *int64 `protobuf:"varint,2,opt,name=range_end,proto3,oneof" json:"range_end"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,3,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source"`
}

type CreateGetObjectSignedUrlReqWithoutTask struct {
// @inject_tag: json:"range_start"
RangeStart *int64 `protobuf:"varint,1,opt,name=range_start,proto3,oneof" json:"range_start"`
// @inject_tag: json:"range_end"
RangeEnd *int64 `protobuf:"varint,2,opt,name=range_end,proto3,oneof" json:"range_end"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,3,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source"`
}

type CreateListObjectsSignedUrlReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"marker"
Marker *string `protobuf:"bytes,2,opt,name=marker,proto3,oneof" json:"marker"`
}

type CreateListObjectsSignedUrlReqWithoutTask struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,1,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"marker"
Marker *string `protobuf:"bytes,2,opt,name=marker,proto3,oneof" json:"marker"`
// @inject_tag: json:"prefix"
Prefix *string `protobuf:"bytes,3,opt,name=source,proto3" json:"prefix"`
}

type CreateSignedUrlResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"signed_url"
SignedUrl string `protobuf:"bytes,4,opt,name=signed_url,proto3" json:"signed_url"`
// @inject_tag: json:"header"
Header map[string]*HeaderValues `protobuf:"bytes,5,rep,name=header,proto3" json:"header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}

func (r *CreateSignedUrlResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *CreateSignedUrlResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type HeaderValues struct {
// @inject_tag: json:"values"
Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
}

type GetIpfsTokenReq struct {
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,1,opt,name=node_name,proto3" json:"node_name" url:"node_name"`
}

type GetIpfsTokenResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"url"
Url string `protobuf:"bytes,4,opt,name=url,proto3" json:"url"`
// @inject_tag: json:"token"
Token string `protobuf:"bytes,5,opt,name=token,proto3" json:"token"`
}

type CreateJCSPreSignedObjectListReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"continuation_token"
ContinuationToken *string `protobuf:"bytes,2,opt,name=continuation_token,proto3,oneof" json:"continuation_token"`
}

type CreateJCSPreSignedObjectUploadReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateJCSPreSignedObjectNewMultipartUploadReq struct {
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,1,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source"`
}

type CreateJCSPreSignedObjectUploadPartReq struct {
// @inject_tag: json:"object_id"
ObjectId int32 `protobuf:"varint,1,opt,name=object_id,proto3" json:"object_id"`
// @inject_tag: json:"index"
Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,3,opt,name=task_id,proto3" json:"task_id"`
}

type CreateJCSPreSignedObjectCompleteMultipartUploadReq struct {
// @inject_tag: json:"object_id"
ObjectId int32 `protobuf:"varint,1,opt,name=object_id,proto3" json:"object_id"`
// @inject_tag: json:"indexes"
Indexes []int32 `protobuf:"varint,2,rep,packed,name=indexes,proto3" json:"indexes"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,3,opt,name=task_id,proto3" json:"task_id"`
}

type CreateJCSPreSignedObjectDownloadReq struct {
// @inject_tag: json:"object_id"
ObjectId int32 `protobuf:"varint,1,opt,name=object_id,proto3" json:"object_id"`
// @inject_tag: json:"offset"
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset"`
// @inject_tag: json:"length"
Length int64 `protobuf:"varint,3,opt,name=length,proto3" json:"length"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
}

type CreateObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"name"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name"`
// @inject_tag: json:"desc"
Desc *string `protobuf:"bytes,3,opt,name=desc,proto3,oneof" json:"desc"`
// @inject_tag: json:"node_name"
NodeName *string `protobuf:"bytes,4,opt,name=node_name,proto3,oneof" json:"node_name"`
}

type CreateObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,4,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"deployment_info"
DeploymentInfo *ObjDeploymentData `protobuf:"bytes,5,opt,name=deployment_info,proto3" json:"deployment_info"`
}

func (r *CreateObjectResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *CreateObjectResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type UploadObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"name"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name"`
// @inject_tag: json:"source_local_path"
SourceLocalPath string `protobuf:"bytes,3,opt,name=source_local_path,proto3" json:"source_local_path"`
// @inject_tag: json:"desc"
Desc *string `protobuf:"bytes,4,opt,name=desc,proto3,oneof" json:"desc"`
// @inject_tag: json:"node_name"
NodeName *string `protobuf:"bytes,5,opt,name=node_name,proto3,oneof" json:"node_name"`
}

type UploadObjectTaskParams struct {
// @inject_tag: json:"request"
Request *UploadObjectReq `protobuf:"bytes,1,opt,name=request,proto3" json:"request"`
// @inject_tag: json:"uuid"
Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid"`
// @inject_tag: json:"node_id"
NodeId int32 `protobuf:"varint,3,opt,name=node_id,proto3" json:"node_id"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,4,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,5,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"location"
Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,7,opt,name=request_id,proto3" json:"request_id"`
}

type UploadObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,5,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,6,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,7,opt,name=obj_uuid,proto3" json:"obj_uuid"`
}

func (r *UploadObjectResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *UploadObjectResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type UploadFileReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
// @inject_tag: json:"source_local_path"
SourceLocalPath string `protobuf:"bytes,4,opt,name=source_local_path,proto3" json:"source_local_path"`
// @inject_tag: json:"size"
Size *int32 `protobuf:"varint,5,opt,name=size,proto3,oneof" json:"size"`
}

type UploadFileTaskParams struct {
// @inject_tag: json:"request"
Request *UploadFileReq `protobuf:"bytes,1,opt,name=request,proto3" json:"request"`
// @inject_tag: json:"node_id"
NodeId int32 `protobuf:"varint,2,opt,name=node_id,proto3" json:"node_id"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,3,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,4,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"location"
Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,6,opt,name=request_id,proto3" json:"request_id"`
}

type UploadFileResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,5,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,6,opt,name=node_name,proto3" json:"node_name"`
}

func (r *UploadFileResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *UploadFileResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type DownloadFileReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source"`
// @inject_tag: json:"target_local_path"
TargetLocalPath string `protobuf:"bytes,4,opt,name=target_local_path,proto3" json:"target_local_path"`
// @inject_tag: json:"node_name"
NodeName *string `protobuf:"bytes,5,opt,name=node_name,proto3,oneof" json:"node_name"`
}

type DownloadFileTaskParams struct {
// @inject_tag: json:"request"
Request *DownloadFileReq `protobuf:"bytes,1,opt,name=request,proto3" json:"request"`
// @inject_tag: json:"node_id"
NodeId int32 `protobuf:"varint,2,opt,name=node_id,proto3" json:"node_id"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,3,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,4,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"bucket_name"
BucketName string `protobuf:"bytes,5,opt,name=bucket_name,proto3" json:"bucket_name"`
// @inject_tag: json:"location"
Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,7,opt,name=request_id,proto3" json:"request_id"`
}

type DownloadFileResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,5,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"bucket_name"
BucketName string `protobuf:"bytes,6,opt,name=bucket_name,proto3" json:"bucket_name"`
}

type GetObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id" url:"user_id"`
// @inject_tag: json:"page_index"
PageIndex int32 `protobuf:"varint,2,opt,name=page_index,proto3" json:"page_index" url:"page_index"`
// @inject_tag: json:"page_size"
PageSize int32 `protobuf:"varint,3,opt,name=page_size,proto3" json:"page_size" url:"page_size"`
// @inject_tag: json:"sort_by"
SortBy *string `protobuf:"bytes,4,opt,name=sort_by,proto3,oneof" json:"sort_by" url:"sort_by,omitempty"`
// @inject_tag: json:"order_by"
OrderBy *string `protobuf:"bytes,5,opt,name=order_by,proto3,oneof" json:"order_by" url:"order_by,omitempty"`
// @inject_tag: json:"obj_uuid"
ObjUuid *string `protobuf:"bytes,6,opt,name=obj_uuid,proto3,oneof" json:"obj_uuid" url:"obj_uuid,omitempty"`
}

type GetObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"data"
Data *GetObjectRespData `protobuf:"bytes,4,opt,name=data,proto3" json:"data"`
}

type GetObjectRespData struct {
// @inject_tag: json:"total"
Total int32 `protobuf:"varint,1,opt,name=total,proto3" json:"total"`
// @inject_tag: json:"list"
List []*ObjectInfo `protobuf:"bytes,2,rep,name=list,proto3" json:"list"`
}

type ObjectInfo struct {
// @inject_tag: json:"meta_info"
MetaInfo *DataObjData `protobuf:"bytes,1,opt,name=meta_info,proto3" json:"meta_info"`
// @inject_tag: json:"deployment_info"
DeploymentInfo []*ObjDeploymentData `protobuf:"bytes,2,rep,name=deployment_info,proto3" json:"deployment_info"`
}

type DataObjData struct {
// @inject_tag: json:"id"
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
// @inject_tag: json:"uuid"
Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid"`
// @inject_tag: json:"name"
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name"`
// @inject_tag: json:"desc"
Desc string `protobuf:"bytes,4,opt,name=desc,proto3" json:"desc"`
// @inject_tag: json:"status"
Status int32 `protobuf:"varint,5,opt,name=status,proto3" json:"status"`
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,6,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"version"
Version int32 `protobuf:"varint,7,opt,name=version,proto3" json:"version"`
// @inject_tag: json:"create_time"
CreateTime string `protobuf:"bytes,8,opt,name=create_time,proto3" json:"create_time"`
// @inject_tag: json:"update_time"
UpdateTime string `protobuf:"bytes,9,opt,name=update_time,proto3" json:"update_time"`
// @inject_tag: json:"delete_time"
DeleteTime string `protobuf:"bytes,10,opt,name=delete_time,proto3" json:"delete_time"`
}

type ObjDeploymentData struct {
// @inject_tag: json:"id"
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"node_id"
NodeId int32 `protobuf:"varint,3,opt,name=node_id,proto3" json:"node_id"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,4,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"location"
Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location"`
// @inject_tag: json:"space"
Space string `protobuf:"bytes,6,opt,name=space,proto3" json:"space"`
// @inject_tag: json:"type"
Type int32 `protobuf:"varint,7,opt,name=type,proto3" json:"type"`
// @inject_tag: json:"status"
Status int32 `protobuf:"varint,8,opt,name=status,proto3" json:"status"`
// @inject_tag: json:"ref_count"
RefCount int32 `protobuf:"varint,9,opt,name=ref_count,proto3" json:"ref_count"`
// @inject_tag: json:"version"
Version int32 `protobuf:"varint,10,opt,name=version,proto3" json:"version"`
// @inject_tag: json:"create_time"
CreateTime string `protobuf:"bytes,11,opt,name=create_time,proto3" json:"create_time"`
// @inject_tag: json:"update_time"
UpdateTime string `protobuf:"bytes,12,opt,name=update_time,proto3" json:"update_time"`
// @inject_tag: json:"delete_time"
DeleteTime string `protobuf:"bytes,13,opt,name=delete_time,proto3" json:"delete_time"`
}

type DownloadObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"target_local_path"
TargetLocalPath string `protobuf:"bytes,3,opt,name=target_local_path,proto3" json:"target_local_path"`
// @inject_tag: json:"node_name"
NodeName *string `protobuf:"bytes,4,opt,name=node_name,proto3,oneof" json:"node_name"`
}

type DownloadObjectTaskParams struct {
// @inject_tag: json:"request"
Request *DownloadObjectReq `protobuf:"bytes,1,opt,name=request,proto3" json:"request"`
// @inject_tag: json:"node_id"
NodeId int32 `protobuf:"varint,2,opt,name=node_id,proto3" json:"node_id"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,3,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,4,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"bucket_name"
BucketName string `protobuf:"bytes,5,opt,name=bucket_name,proto3" json:"bucket_name"`
// @inject_tag: json:"location"
Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,7,opt,name=request_id,proto3" json:"request_id"`
}

type DownloadObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"node_type"
NodeType int32 `protobuf:"varint,5,opt,name=node_type,proto3" json:"node_type"`
// @inject_tag: json:"bucket_name"
BucketName string `protobuf:"bytes,6,opt,name=bucket_name,proto3" json:"bucket_name"`
}

type LoadObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source_node_name"
SourceNodeName *string `protobuf:"bytes,3,opt,name=source_node_name,proto3,oneof" json:"source_node_name"`
// @inject_tag: json:"target_node_name"
TargetNodeName string `protobuf:"bytes,4,opt,name=target_node_name,proto3" json:"target_node_name"`
// @inject_tag: json:"cache_local_path"
CacheLocalPath string `protobuf:"bytes,5,opt,name=cache_local_path,proto3" json:"cache_local_path"`
}

type LoadObjectTaskParams struct {
// @inject_tag: json:"request"
Request *LoadObjectReq `protobuf:"bytes,1,opt,name=request,proto3" json:"request"`
// @inject_tag: json:"source_node_id"
SourceNodeId int32 `protobuf:"varint,2,opt,name=source_node_id,proto3" json:"source_node_id"`
// @inject_tag: json:"source_node_name"
SourceNodeName string `protobuf:"bytes,3,opt,name=source_node_name,proto3" json:"source_node_name"`
// @inject_tag: json:"source_node_type"
SourceNodeType int32 `protobuf:"varint,4,opt,name=source_node_type,proto3" json:"source_node_type"`
// @inject_tag: json:"source_bucket_name"
SourceBucketName string `protobuf:"bytes,5,opt,name=source_bucket_name,proto3" json:"source_bucket_name"`
// @inject_tag: json:"source_location"
SourceLocation string `protobuf:"bytes,6,opt,name=source_location,proto3" json:"source_location"`
// @inject_tag: json:"target_node_id"
TargetNodeId int32 `protobuf:"varint,7,opt,name=target_node_id,proto3" json:"target_node_id"`
// @inject_tag: json:"target_node_name"
TargetNodeName string `protobuf:"bytes,8,opt,name=target_node_name,proto3" json:"target_node_name"`
// @inject_tag: json:"target_node_type"
TargetNodeType int32 `protobuf:"varint,9,opt,name=target_node_type,proto3" json:"target_node_type"`
// @inject_tag: json:"target_bucket_name"
TargetBucketName string `protobuf:"bytes,10,opt,name=target_bucket_name,proto3" json:"target_bucket_name"`
// @inject_tag: json:"target_location"
TargetLocation string `protobuf:"bytes,11,opt,name=target_location,proto3" json:"target_location"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,12,opt,name=request_id,proto3" json:"request_id"`
}

type LoadObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"source_node_id"
SourceNodeId int32 `protobuf:"varint,5,opt,name=source_node_id,proto3" json:"source_node_id"`
// @inject_tag: json:"source_node_name"
SourceNodeName string `protobuf:"bytes,6,opt,name=source_node_name,proto3" json:"source_node_name"`
// @inject_tag: json:"source_node_type"
SourceNodeType int32 `protobuf:"varint,7,opt,name=source_node_type,proto3" json:"source_node_type"`
// @inject_tag: json:"source_bucket_name"
SourceBucketName string `protobuf:"bytes,8,opt,name=source_bucket_name,proto3" json:"source_bucket_name"`
// @inject_tag: json:"source_location"
SourceLocation string `protobuf:"bytes,9,opt,name=source_location,proto3" json:"source_location"`
// @inject_tag: json:"target_node_id"
TargetNodeId int32 `protobuf:"varint,10,opt,name=target_node_id,proto3" json:"target_node_id"`
// @inject_tag: json:"target_node_name"
TargetNodeName string `protobuf:"bytes,11,opt,name=target_node_name,proto3" json:"target_node_name"`
// @inject_tag: json:"target_node_type"
TargetNodeType int32 `protobuf:"varint,12,opt,name=target_node_type,proto3" json:"target_node_type"`
// @inject_tag: json:"target_location"
TargetLocation string `protobuf:"bytes,13,opt,name=target_location,proto3" json:"target_location"`
// @inject_tag: json:"target_deployment_info"
TargetDeploymentInfo *ObjDeploymentData `protobuf:"bytes,14,opt,name=target_deployment_info,proto3" json:"target_deployment_info"`
}

type MigrateObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"source_node_name"
SourceNodeName *string `protobuf:"bytes,3,opt,name=source_node_name,proto3,oneof" json:"source_node_name"`
// @inject_tag: json:"target_node_name"
TargetNodeName string `protobuf:"bytes,4,opt,name=target_node_name,proto3" json:"target_node_name"`
}

type MigrateObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"target_deployment_info"
TargetDeploymentInfo *ObjDeploymentData `protobuf:"bytes,5,opt,name=target_deployment_info,proto3" json:"target_deployment_info"`
}

type CopyObjectReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"source_obj_uuid"
SourceObjUuid string `protobuf:"bytes,2,opt,name=source_obj_uuid,proto3" json:"source_obj_uuid"`
// @inject_tag: json:"object_keys"
ObjectKeys []string `protobuf:"bytes,3,rep,name=object_keys,proto3" json:"object_keys"`
// @inject_tag: json:"target_obj_uuid"
TargetObjUuid *string `protobuf:"bytes,4,opt,name=target_obj_uuid,proto3,oneof" json:"target_obj_uuid"`
// @inject_tag: json:"name"
Name *string `protobuf:"bytes,5,opt,name=name,proto3,oneof" json:"name"`
// @inject_tag: json:"desc"
Desc *string `protobuf:"bytes,6,opt,name=desc,proto3,oneof" json:"desc"`
}

type CopyObjectResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,4,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"dest_obj_uuid"
DestObjUuid string `protobuf:"bytes,5,opt,name=dest_obj_uuid,proto3" json:"dest_obj_uuid"`
}

type PutObjectDeploymentReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"node_name"
NodeName string `protobuf:"bytes,3,opt,name=node_name,proto3" json:"node_name"`
// @inject_tag: json:"location"
Location *string `protobuf:"bytes,4,opt,name=location,proto3,oneof" json:"location"`
}

type DeleteObjectReq struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
}

type DeleteFileReq struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,1,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
// @inject_tag: json:"source"
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source" url:"source"`
}

type DeleteObjectDeploymentReq struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
// @inject_tag: json:"force"
Force *bool `protobuf:"varint,4,opt,name=force,proto3,oneof" json:"force" url:"force"`
}

type ListObjectsReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id" url:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
// @inject_tag: json:"prefix"
Prefix *string `protobuf:"bytes,3,opt,name=prefix,proto3,oneof" json:"prefix" url:"prefix,omitempty"`
// @inject_tag: json:"marker"
Marker *string `protobuf:"bytes,4,opt,name=marker,proto3,oneof" json:"marker" url:"marker,omitempty"`
// @inject_tag: json:"max_keys"
MaxKeys *int32 `protobuf:"varint,5,opt,name=max_keys,proto3,oneof" json:"max_keys" url:"max_keys,omitempty"`
// @inject_tag: json:"delimiter"
Delimiter *string `protobuf:"bytes,6,opt,name=delimiter,proto3,oneof" json:"delimiter" url:"delimiter,omitempty"`
}

type ListObjectsResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"data"
Data ListObjectsRespData `protobuf:"bytes,4,opt,name=data,proto3" json:"data"`
}

func (r *ListObjectsResp) ToError() error {
if r.Code == ObjectStatusInvalidCode {
return ErrObjectStatusInvalid{}
}
return fmt.Errorf("urchin operate error.[%d]%s", r.Code, r.Message)
}

func (r *ListObjectsResp) IsSuccess() bool {
return r.Code == UrchinSuccessCode
}

type ListObjectsRespData struct {
// @inject_tag: json:"next_marker"
NextMarker string `protobuf:"bytes,1,opt,name=next_marker,proto3" json:"next_marker"`
// @inject_tag: json:"list"
List []*ObjectContent `protobuf:"bytes,2,rep,name=list,proto3" json:"list"`
// @inject_tag: json:"common_prefixes"
CommonPrefixes []string `protobuf:"bytes,3,rep,name=common_prefixes,proto3" json:"common_prefixes"`
// @inject_tag: json:"is_truncated"
IsTruncated bool `protobuf:"varint,4,opt,name=is_truncated,proto3" json:"is_truncated"`
}

type ObjectContent struct {
// @inject_tag: json:"key"
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"`
// @inject_tag: json:"etag"
Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag"`
// @inject_tag: json:"size"
// Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size"`
Size string `protobuf:"varint,3,opt,name=size,proto3" json:"size"`
// @inject_tag: json:"last_modified"
LastModified time.Time `protobuf:"bytes,4,opt,name=last_modified,proto3" json:"last_modified"`
}

type GetObjectMetadataReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"object_key"
ObjectKey string `protobuf:"bytes,3,opt,name=object_key,proto3" json:"object_key"`
}

type GetObjectMetadataResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"data"
Data *GetObjectMetadataData `protobuf:"bytes,4,opt,name=data,proto3" json:"data"`
}

type GetObjectMetadataData struct {
// @inject_tag: json:"content_length"
ContentLength int64 `protobuf:"varint,1,opt,name=content_length,proto3" json:"content_length"`
// @inject_tag: json:"etag"
Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag"`
// @inject_tag: json:"last_modified"
LastModified string `protobuf:"bytes,3,opt,name=last_modified,proto3" json:"last_modified"`
}

type ListPartsReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id" url:"user_id"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,2,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
// @inject_tag: json:"upload_id"
UploadId string `protobuf:"bytes,3,opt,name=upload_id,proto3" json:"upload_id" url:"upload_id"`
// @inject_tag: json:"max_parts"
MaxParts *int32 `protobuf:"varint,4,opt,name=max_parts,proto3,oneof" json:"max_parts" url:"max_parts,omitempty"`
// @inject_tag: json:"part_number_marker"
PartNumberMarker *int32 `protobuf:"varint,5,opt,name=part_number_marker,proto3,oneof" json:"part_number_marker" url:"part_number_marker,omitempty"`
}

type ListPartsResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"data"
Data *ListPartsRespData `protobuf:"bytes,4,opt,name=data,proto3" json:"data"`
}

type ListPartsRespData struct {
// @inject_tag: json:"is_truncated"
IsTruncated bool `protobuf:"varint,1,opt,name=is_truncated,proto3" json:"is_truncated"`
// @inject_tag: json:"next_part_number_marker"
NextPartNumberMarker int32 `protobuf:"varint,2,opt,name=next_part_number_marker,proto3" json:"next_part_number_marker"`
// @inject_tag: json:"list"
List []*PartContent `protobuf:"bytes,3,rep,name=list,proto3" json:"list"`
}

type PartContent struct {
// @inject_tag: json:"part_number"
PartNumber int32 `protobuf:"varint,1,opt,name=part_number,proto3" json:"part_number"`
// @inject_tag: json:"etag"
Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag"`
// @inject_tag: json:"size"
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size"`
// @inject_tag: json:"last_modified"
LastModified string `protobuf:"bytes,4,opt,name=last_modified,proto3" json:"last_modified"`
}

type GetTaskReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id" url:"user_id"`
// @inject_tag: json:"page_index"
PageIndex int32 `protobuf:"varint,2,opt,name=page_index,proto3" json:"page_index" url:"page_index"`
// @inject_tag: json:"page_size"
PageSize int32 `protobuf:"varint,3,opt,name=page_size,proto3" json:"page_size" url:"page_size"`
// @inject_tag: json:"sort_by"
SortBy *string `protobuf:"bytes,4,opt,name=sort_by,proto3,oneof" json:"sort_by" url:"sort_by,omitempty"`
// @inject_tag: json:"order_by"
OrderBy *string `protobuf:"bytes,5,opt,name=order_by,proto3,oneof" json:"order_by" url:"order_by,omitempty"`
// @inject_tag: json:"task_id"
TaskId *int32 `protobuf:"varint,6,opt,name=task_id,proto3,oneof" json:"task_id" url:"task_id,omitempty"`
}

type GetTaskResp struct {
// @inject_tag: json:"code"
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code"`
// @inject_tag: json:"message"
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"`
// @inject_tag: json:"request_id"
RequestId string `protobuf:"bytes,3,opt,name=request_id,proto3" json:"request_id"`
// @inject_tag: json:"data"
Data *GetTaskRespData `protobuf:"bytes,4,opt,name=data,proto3" json:"data"`
}

type GetTaskRespData struct {
// @inject_tag: json:"total"
Total int32 `protobuf:"varint,1,opt,name=total,proto3" json:"total"`
// @inject_tag: json:"list"
List []*TaskDetail `protobuf:"bytes,2,rep,name=list,proto3" json:"list"`
}

type TaskDetail struct {
// @inject_tag: json:"task"
Task *TaskData `protobuf:"bytes,1,opt,name=task,proto3" json:"task"`
// @inject_tag: json:"execs"
Execs []*TaskExecData `protobuf:"bytes,2,rep,name=execs,proto3" json:"execs"`
}

type TaskData struct {
// @inject_tag: json:"id"
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
// @inject_tag: json:"type"
Type int32 `protobuf:"varint,2,opt,name=type,proto3" json:"type"`
// @inject_tag: json:"name"
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name"`
// @inject_tag: json:"params"
Params string `protobuf:"bytes,4,opt,name=params,proto3" json:"params"`
// @inject_tag: json:"status"
Status int32 `protobuf:"varint,5,opt,name=status,proto3" json:"status"`
// @inject_tag: json:"task_exec_id"
TaskExecId int32 `protobuf:"varint,6,opt,name=task_exec_id,proto3" json:"task_exec_id"`
// @inject_tag: json:"result"
Result int32 `protobuf:"varint,7,opt,name=result,proto3" json:"result"`
// @inject_tag: json:"return"
Return string `protobuf:"bytes,8,opt,name=return,proto3" json:"return"`
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,9,opt,name=obj_uuid,proto3" json:"obj_uuid"`
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,10,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"start_time"
StartTime string `protobuf:"bytes,11,opt,name=start_time,proto3" json:"start_time"`
// @inject_tag: json:"finish_time"
FinishTime string `protobuf:"bytes,12,opt,name=finish_time,proto3" json:"finish_time"`
// @inject_tag: json:"create_time"
CreateTime string `protobuf:"bytes,13,opt,name=create_time,proto3" json:"create_time"`
// @inject_tag: json:"update_time"
UpdateTime string `protobuf:"bytes,14,opt,name=update_time,proto3" json:"update_time"`
}

type TaskExecData struct {
// @inject_tag: json:"id"
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"context"
Context string `protobuf:"bytes,3,opt,name=context,proto3" json:"context"`
// @inject_tag: json:"status"
Status int32 `protobuf:"varint,4,opt,name=status,proto3" json:"status"`
// @inject_tag: json:"result"
Result int32 `protobuf:"varint,5,opt,name=result,proto3" json:"result"`
// @inject_tag: json:"return"
Return string `protobuf:"bytes,6,opt,name=return,proto3" json:"return"`
// @inject_tag: json:"start_time"
StartTime string `protobuf:"bytes,7,opt,name=start_time,proto3" json:"start_time"`
// @inject_tag: json:"finish_time"
FinishTime string `protobuf:"bytes,8,opt,name=finish_time,proto3" json:"finish_time"`
// @inject_tag: json:"create_time"
CreateTime string `protobuf:"bytes,9,opt,name=create_time,proto3" json:"create_time"`
// @inject_tag: json:"update_time"
UpdateTime string `protobuf:"bytes,10,opt,name=update_time,proto3" json:"update_time"`
}

type FinishTaskReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
// @inject_tag: json:"result"
Result int32 `protobuf:"varint,3,opt,name=result,proto3" json:"result"`
// @inject_tag: json:"return"
Return *string `protobuf:"bytes,4,opt,name=return,proto3,oneof" json:"return"`
}

type RetryTaskReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
}

type ReportTaskHeartbeatReq struct {
// @inject_tag: json:"user_id"
UserId string `protobuf:"bytes,1,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"task_id"
TaskId int32 `protobuf:"varint,2,opt,name=task_id,proto3" json:"task_id"`
}

type XIpfsUpload struct {
XMLName xml.Name `xml:"IpfsUpload"`
CId string `xml:"CId"`
Result int `xml:"Result"`
}

type XIpfsDownload struct {
XMLName xml.Name `xml:"IpfsDownload"`
Result int `xml:"Result"`
}

type StorageNodeConfig struct {
// @inject_tag: json:"endpoint"
Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint"`
// @inject_tag: json:"access_key"
AccessKey string `protobuf:"bytes,2,opt,name=access_key,proto3" json:"access_key"`
// @inject_tag: json:"secret_key"
SecretKey string `protobuf:"bytes,3,opt,name=secret_key,proto3" json:"secret_key"`
// @inject_tag: json:"user"
User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user"`
// @inject_tag: json:"pass"
Pass string `protobuf:"bytes,5,opt,name=pass,proto3" json:"pass"`
// @inject_tag: json:"bucket_name"
BucketName string `protobuf:"bytes,6,opt,name=bucket_name,proto3" json:"bucket_name"`
// @inject_tag: json:"auth_service"
AuthService string `protobuf:"bytes,7,opt,name=auth_service,proto3" json:"auth_service"`
// @inject_tag: json:"auth_region"
AuthRegion string `protobuf:"bytes,8,opt,name=auth_region,proto3" json:"auth_region"`
// @inject_tag: json:"user_id"
UserId int32 `protobuf:"varint,9,opt,name=user_id,proto3" json:"user_id"`
// @inject_tag: json:"bucket_id"
BucketId int32 `protobuf:"varint,10,opt,name=bucket_id,proto3" json:"bucket_id"`
// @inject_tag: json:"base_path"
BasePath string `protobuf:"bytes,11,opt,name=base_path,proto3" json:"base_path"`
// @inject_tag: json:"url"
Url string `protobuf:"bytes,12,opt,name=url,proto3" json:"url"`
// @inject_tag: json:"cluster_id"
ClusterId string `protobuf:"bytes,13,opt,name=cluster_id,proto3" json:"cluster_id"`
// @inject_tag: json:"org_id"
OrgId string `protobuf:"bytes,14,opt,name=org_id,proto3" json:"org_id"`
// @inject_tag: json:"pass_magic"
PassMagic string `protobuf:"bytes,15,opt,name=pass_magic,proto3" json:"pass_magic"`
// @inject_tag: json:"req_timeout"
ReqTimeout int32 `protobuf:"varint,16,opt,name=req_timeout,proto3" json:"req_timeout"`
// @inject_tag: json:"max_connection"
MaxConnection int32 `protobuf:"varint,17,opt,name=max_connection,proto3" json:"max_connection"`
// @inject_tag: json:"concurrency"
Concurrency *StorageNodeConcurrencyConfig `protobuf:"bytes,18,opt,name=concurrency,proto3" json:"concurrency"`
// @inject_tag: json:"rate"
Rate *StorageNodeRateConfig `protobuf:"bytes,19,opt,name=rate,proto3" json:"rate"`
// @inject_tag: json:"part"
Part *StorageNodePartConfig `protobuf:"bytes,20,opt,name=part,proto3" json:"part"`
}

type StorageNodeConcurrencyConfig struct {
UploadFileTaskNum int32 `protobuf:"varint,1,opt,name=upload_file_task_num,proto3" json:"upload_file_task_num,omitempty"`
UploadMultiTaskNum int32 `protobuf:"varint,2,opt,name=upload_multi_task_num,proto3" json:"upload_multi_task_num,omitempty"`
DownloadFileTaskNum int32 `protobuf:"varint,3,opt,name=download_file_task_num,proto3" json:"download_file_task_num,omitempty"`
DownloadMultiTaskNum int32 `protobuf:"varint,4,opt,name=download_multi_task_num,proto3" json:"download_multi_task_num,omitempty"`
}

type StorageNodeRateConfig struct {
Limit float32 `protobuf:"fixed32,1,opt,name=limit,proto3" json:"limit,omitempty"`
Burst int32 `protobuf:"varint,2,opt,name=burst,proto3" json:"burst,omitempty"`
}

type StorageNodePartConfig struct {
PartLimit int64 `protobuf:"varint,1,opt,name=part_limit,proto3" json:"part_limit,omitempty"`
PartSize int64 `protobuf:"varint,2,opt,name=part_size,proto3" json:"part_size,omitempty"`
}

type ReportSizeChangedReq struct {
// @inject_tag: json:"obj_uuid"
ObjUuid string `protobuf:"bytes,1,opt,name=obj_uuid,proto3" json:"obj_uuid" url:"obj_uuid"`
}

+ 2
- 1
options/locale/locale_en-US.ini View File

@@ -976,6 +976,7 @@ description = Description
description_format_err=Description's length can be up to %s characters long.
dataset_name_exist = Dataset English name already exists
dataset_alias_exist = Dataset Chinese name already exists
resource_in_use = Resource is in use by task, operation not possible, please try again later.
over_preview_size = The file size exceeds the preview limit
unsupported_preview_file_type = The file type does not support preview
create_dataset = Create Dataset
@@ -3790,4 +3791,4 @@ change_team_access_not_allowed = Changing team access for subject has been restr
org_not_allowed_to_upload_dataset_file = Datasets owned by organizations currently do not allow file uploads. If this feature is required, please wait for future updates.

[aimodel]
org_not_allowed_to_upload_aimodel_file = Aimodels owned by organizations currently do not allow file uploads. If this feature is required, please wait for future updates.
org_not_allowed_to_upload_aimodel_file = Aimodels owned by organizations currently do not allow file uploads. If this feature is required, please wait for future updates.

+ 1
- 0
options/locale/locale_zh-CN.ini View File

@@ -985,6 +985,7 @@ description=描述
description_format_err=描述最多允许输入%s个字符。
dataset_name_exist = 数据集英文名称已存在。
dataset_alias_exist = 数据集中文名称已存在。
resource_in_use = 资源在被云脑任务使用中,无法执行此操作,请稍后重试
over_preview_size = 该文件大小超过了预览限制。
unsupported_preview_file_type = 该文件类型不支持预览。
create_dataset=创建数据集


+ 12
- 11
routers/api/v1/aimodel/aimodel.go View File

@@ -2,6 +2,15 @@ package aimodel

import (
"archive/zip"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"

"code.gitea.io/gitea/entity"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
@@ -14,14 +23,6 @@ import (
"code.gitea.io/gitea/services/ai_task_service/storage_helper"
"code.gitea.io/gitea/services/dynconfig/fetcher"
"code.gitea.io/gitea/services/subject_service"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
)

func CreateAimodel(ctx *context.Context, req entity.CreateAimodelReq) {
@@ -183,7 +184,7 @@ func DelAimodel(ctx *context.Context) {
aimodel := ctx.AccessContext.Aimodel
err := subject_service.DeleteAimodel(ctx.User, aimodel)
if err != nil {
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())
@@ -245,7 +246,7 @@ func PutAimodelReadme(ctx *context.Context, req entity.AimodelReadmeReq) {
err := subject_service.PutAimodelReadme(req)
if err != nil {
log.Error("PutAimodelReadme failed err=%v", err)
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())
@@ -477,7 +478,7 @@ func DeleteAimodelFile(ctx *context.Context) {
err := subject_service.DeleteAimodelFile(aimodel, parentDir, fileName)
if err != nil {
log.Error("DeleteAimodelFile failed, aimodel.ID=%s parentDir=%s fileName=%s err=%v", aimodel.ID, parentDir, fileName, err)
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())


+ 6
- 6
routers/api/v1/dataset/dataset.go View File

@@ -148,7 +148,7 @@ func PutDatasetReadme(ctx *context.Context, req entity.DatasetReadmeReq) {
err := subject_service.PutDatasetReadme(req)
if err != nil {
log.Error("PutDatasetReadme failed err=%v", err)
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())
@@ -710,7 +710,7 @@ func DeleteDatasetFile(ctx *context.Context) {
err := subject_service.DeleteDatasetFile(dataset, parentDir, fileName)
if err != nil {
log.Error("DeleteFile failed, dataset.ID=%s parentDir=%s fileName=%s err=%v", dataset.ID, parentDir, fileName, err)
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())
@@ -1049,10 +1049,10 @@ func GetDownloadDatasetMeta(ctx *context.Context) {
}

func AddFileToZip(helper storage_helper.StorageHelper, file storage.FileInfo, zipWriter *zip.Writer, buf []byte) error {
reader, err := helper.OpenFile(file.RelativePath)
reader, err := helper.OpenFile(file.FullPath)
if err != nil {
log.Error("OpenFile err.filePath=%+v,err=%v", file.RelativePath, err)
return fmt.Errorf("failed to open file %s: %w", file.RelativePath, err)
log.Error("OpenFile err.filePath=%+v,err=%v", file.FullPath, err)
return fmt.Errorf("failed to open file %s: %w", file.FullPath, err)
}
defer reader.Close()

@@ -1136,7 +1136,7 @@ func DelDataset(ctx *context.Context) {
dataset := ctx.AccessContext.Dataset
err := subject_service.DeleteDataset(ctx.User, dataset)
if err != nil {
ctx.JSON(http.StatusOK, response.ResponseError(err))
ctx.JSON(http.StatusOK, response.OuterTrBizError(response.NewBizError(err), ctx.Locale))
return
}
ctx.JSON(http.StatusOK, response.OuterSuccess())


+ 3
- 2
routers/response/response_list.go View File

@@ -53,8 +53,8 @@ var PORT_RANGE = &BizError{Code: 2031, DefaultMsg: "The port range is 8000 to 88
var ENDPOINT_NOT_START_SLASH = &BizError{Code: 2032, DefaultMsg: "Custom paths need to comply with URL specifications", TrCode: "ai_task.endpoint_not_start_slash"}
var ENDPOINT_MUST_BE_VALID = &BizError{Code: 2033, DefaultMsg: "Custom paths can only include strings, numbers, and /", TrCode: "ai_task.endpoint_must_be_valid"}
var CAN_NOT_STOP_SAVING_IMAGE_JOB = &BizError{Code: 2034, DefaultMsg: "Cannot stop the AI task while it is saving the image.", TrCode: "ai_task.can_not_stop_saving_image_job"}
var CAN_NOT_FINETUNE_EXPERIENCE=&BizError{Code: 2035, DefaultMsg: "The task can not be exepirenced online.", TrCode: "ai_task.can_not_finetune_experience"}
var CAN_NOT_Eval=&BizError{Code: 2036, DefaultMsg: "The task can not be evaluated online.", TrCode: "ai_task.can_not_eval"}
var CAN_NOT_FINETUNE_EXPERIENCE = &BizError{Code: 2035, DefaultMsg: "The task can not be exepirenced online.", TrCode: "ai_task.can_not_finetune_experience"}
var CAN_NOT_Eval = &BizError{Code: 2036, DefaultMsg: "The task can not be evaluated online.", TrCode: "ai_task.can_not_eval"}

// 登录相关错误
var QR_CODE_EXPIRED = &BizError{Code: 3001, DefaultMsg: "It has expired, please scan the QR code again", TrCode: "form.qr_code_expire"}
@@ -71,6 +71,7 @@ var DATASET_EXIST = &BizError{Code: 4007, DefaultMsg: "dataset exists", TrCode:
var DATASET_PATH_EMPTY = &BizError{Code: 4008, DefaultMsg: "dataset path empty", TrCode: ""}
var DATASET_ALIAS_INVALID = &BizError{Code: 4009, DefaultMsg: "Dataset alias is invalid", TrCode: "dataset.alias_format_err"}
var DATASET_ALIAS_EXIST = &BizError{Code: 4010, DefaultMsg: "Dataset alias already exists", TrCode: "dataset.dataset_alias_exist"}
var RESOURCE_IN_USE = &BizError{Code: 4011, DefaultMsg: "Resource is in use, operation not possible, please try again later.", TrCode: "dataset.resource_in_use"}

// 权限相关错误
var ORG_NOT_ALLOWED_TO_BE_COLLABORATOR = &BizError{Code: 5001, DefaultMsg: "Organizations cannot be added as a collaborator", TrCode: "access.org_not_allowed_to_be_collaborator"}


+ 1
- 0
services/ai_task_service/cluster/c2net.go View File

@@ -700,6 +700,7 @@ func convertContainer2Grampus(d entity.ContainerData) models.GrampusDataset {
IsOverwrite: d.IsOverwrite,
IsNeedUnzip: d.IsNeedUnzip,
IsNeedTensorboard: d.IsNeedTensorboard,
Id: d.Id,
}
}



+ 1
- 0
services/ai_task_service/container_builder/dataset_builder.go View File

@@ -80,6 +80,7 @@ func (b *DatasetBuilder) buildDatasetData(dataset *models.DatasetRegistry, jobNa
Name: dataset.Name,
Bucket: uploader.GetBucket(),
EndPoint: uploader.GetEndpoint(),
Id: uploader.GetDataId(dataset.Path),
ObjectKey: datasetpath,
ReadOnly: true,
ContainerPath: path.Join(b.Opts.ContainerPath, dataset.Name),


+ 63
- 62
services/ai_task_service/container_builder/pre_model_builder.go View File

@@ -27,6 +27,10 @@ func (b *PretrainModelBuilder) SetOpts(opts *entity.ContainerBuildOpts) {
b.Opts = opts
}

func (b *PretrainModelBuilder) GetContainerType() entity.ContainerDataType {
return entity.ContainerPreTrainModel
}

func (b *PretrainModelBuilder) Build(ctx *context.CreationContext) ([]entity.ContainerData, *response.BizError) {
log.Info("Start to build pretrain model.displayJobName=%s jobType=%s cluster=%s", ctx.Request.DisplayJobName, ctx.Request.JobType, ctx.Request.Cluster)
form := ctx.Request
@@ -85,10 +89,6 @@ func (b *PretrainModelBuilder) Build(ctx *context.CreationContext) ([]entity.Con
return preTrainModelEntity, nil
}

func (b *PretrainModelBuilder) GetContainerType() entity.ContainerDataType {
return entity.ContainerPreTrainModel
}

const MODEL_MKDIR_README = "The model files have already been loaded into the container and are ready for use.\n"

func (b *PretrainModelBuilder) buildAimodelData(aimodel *models.AiModelManage, jobName string) (entity.ContainerData, *response.BizError) {
@@ -103,6 +103,7 @@ func (b *PretrainModelBuilder) buildAimodelData(aimodel *models.AiModelManage, j
Name: aimodel.Name,
Bucket: uploader.GetBucket(),
EndPoint: uploader.GetEndpoint(),
Id: uploader.GetDataId(aimodel.Path),
ObjectKey: aimodelpath,
ReadOnly: true,
ContainerPath: path.Join(b.Opts.ContainerPath, aimodel.Name),
@@ -117,61 +118,61 @@ func (b *PretrainModelBuilder) buildAimodelData(aimodel *models.AiModelManage, j
return modelData, nil
}

func (b *PretrainModelBuilder) buildModelData(m *models.AiModelManage, jobName string) (entity.ContainerData, *response.BizError) {
oldStorageType := entity.GetStorageTypeFromCloudbrainType(m.Type)
if oldStorageType == "" {
log.Error("model storage type error.modelId=%d", m.ID)
return entity.ContainerData{}, response.SYSTEM_ERROR
}
oldStorageHelper := storage_helper.SelectStorageHelperFromStorageType(oldStorageType)
preTrainModelPath := getPreTrainModelPath(m.Path)
storageType := oldStorageType
if !b.Opts.IsStorageTypeIn(oldStorageType) {
//意味着模型之前存储的位置不符合要求,需要转存到指定存储
newStorageType := b.Opts.AcceptStorageType[0]
newStorageHelper := storage_helper.SelectStorageHelperFromStorageType(newStorageType)
files, err := oldStorageHelper.GetAllObjectsUnderDir(preTrainModelPath)
newObjectPrefix := path.Join(newStorageHelper.GetJobDefaultObjectKeyPrefix(jobName), b.Opts.GetLocalPath(), m.Name)
for _, file := range files {
newFilePath := path.Join(newObjectPrefix, file.FileName)
err = storage_helper.CopyFileBetweenStorage(oldStorageHelper, newStorageHelper, file.RelativePath, newFilePath)
if err != nil {
log.Error("transfer file between storage error.model=%+v file=%+v err=%v", m, file, err)
return entity.ContainerData{}, response.SYSTEM_ERROR
}
}
preTrainModelPath = newObjectPrefix
storageType = newStorageType
}
uploader := storage_helper.SelectStorageHelperFromStorageType(storageType)
uploader.MKDIR(preTrainModelPath, MODEL_MKDIR_README)
modelData := entity.ContainerData{
Name: m.Name,
Bucket: uploader.GetBucket(),
EndPoint: uploader.GetEndpoint(),
ObjectKey: preTrainModelPath,
ReadOnly: true,
ContainerPath: path.Join(b.Opts.ContainerPath, m.Name),
RealPath: uploader.GetRealPath(preTrainModelPath),
S3DownloadUrl: uploader.GetS3DownloadUrl(preTrainModelPath),
IsDir: true,
Size: m.Size,
IsOverwrite: false,
IsNeedUnzip: false,
}
log.Info("buildModelData modelData=%+v", modelData)
return modelData, nil
}
func getPreTrainModelPath(pretrainModelDir string) string {
index := strings.Index(pretrainModelDir, "/")
if index > 0 {
filterBucket := pretrainModelDir[index+1:]
return filterBucket
} else {
return ""
}
}
//func (b *PretrainModelBuilder) buildModelData(m *models.AiModelManage, jobName string) (entity.ContainerData, *response.BizError) {
// oldStorageType := entity.GetStorageTypeFromCloudbrainType(m.Type)
// if oldStorageType == "" {
// log.Error("model storage type error.modelId=%d", m.ID)
// return entity.ContainerData{}, response.SYSTEM_ERROR
// }
// oldStorageHelper := storage_helper.SelectStorageHelperFromStorageType(oldStorageType)
//
// preTrainModelPath := getPreTrainModelPath(m.Path)
// storageType := oldStorageType
// if !b.Opts.IsStorageTypeIn(oldStorageType) {
// //意味着模型之前存储的位置不符合要求,需要转存到指定存储
// newStorageType := b.Opts.AcceptStorageType[0]
// newStorageHelper := storage_helper.SelectStorageHelperFromStorageType(newStorageType)
// files, err := oldStorageHelper.GetAllObjectsUnderDir(preTrainModelPath)
// newObjectPrefix := path.Join(newStorageHelper.GetJobDefaultObjectKeyPrefix(jobName), b.Opts.GetLocalPath(), m.Name)
// for _, file := range files {
// newFilePath := path.Join(newObjectPrefix, file.FileName)
// err = storage_helper.CopyFileBetweenStorage(oldStorageHelper, newStorageHelper, file.RelativePath, newFilePath)
// if err != nil {
// log.Error("transfer file between storage error.model=%+v file=%+v err=%v", m, file, err)
// return entity.ContainerData{}, response.SYSTEM_ERROR
// }
// }
// preTrainModelPath = newObjectPrefix
// storageType = newStorageType
// }
//
// uploader := storage_helper.SelectStorageHelperFromStorageType(storageType)
// uploader.MKDIR(preTrainModelPath, MODEL_MKDIR_README)
// modelData := entity.ContainerData{
// Name: m.Name,
// Bucket: uploader.GetBucket(),
// EndPoint: uploader.GetEndpoint(),
// ObjectKey: preTrainModelPath,
// ReadOnly: true,
// ContainerPath: path.Join(b.Opts.ContainerPath, m.Name),
// RealPath: uploader.GetRealPath(preTrainModelPath),
// S3DownloadUrl: uploader.GetS3DownloadUrl(preTrainModelPath),
// IsDir: true,
// Size: m.Size,
// IsOverwrite: false,
// IsNeedUnzip: false,
// }
// log.Info("buildModelData modelData=%+v", modelData)
// return modelData, nil
//}
//
//func getPreTrainModelPath(pretrainModelDir string) string {
// index := strings.Index(pretrainModelDir, "/")
// if index > 0 {
// filterBucket := pretrainModelDir[index+1:]
// return filterBucket
// } else {
// return ""
// }
//
//}

+ 35
- 1
services/ai_task_service/storage_helper/client.go View File

@@ -42,12 +42,40 @@ type PartInfo struct {
LastModified time.Time
}

type UploadDirReq struct {
LocalPath string
TargetObjectPrefix string
ReqId string
}

type MKDIRReq struct {
ReqId string
ObjectKey string
Description string
}

// dirPath string, maxKeyArray ...int
type ListObjectsReq struct {
Prefix string
MaxKey int
Marker string
ReqId string
Delimiter string
}

type GetObjectReq struct {
ReqId string
path string
}

type StorageHelper interface {
UploadDir(codePath, jobName string) error
AllocateDatasetNamespace(name, prefix string) (string, error)
UploadDir(codePath, objectKeyPrefix string) error
UploadFile(objectKey string, r io.Reader) error
GetRealPath(objectKey string) string
GetBucket() string
GetEndpoint() string
GetDataId(path string) string
GetJobDefaultObjectKeyPrefix(jobName string) string
MKDIR(path string, description ...string) error
GetOneLevelObjectsUnderDir(dirPath string, maxKeyArray ...int) ([]storage.FileInfo, error)
@@ -63,6 +91,7 @@ type StorageHelper interface {
CopyFile(sourcePath, targetPath string) error
DeleteDir(dirPath string) (remainingFiles bool, err error)
DeleteFile(filePath string) error
DeleteCollection(path string) (remainingFiles bool, err error)
GetAllObjectByBucketAndPrefix(bucket string, prefix string) ([]storage.FileInfo, error)
GetFilesSize(Files []string) int64
RemoveObject(path string) error
@@ -75,6 +104,8 @@ type StorageHelper interface {
CompleteMultiPartUpload(objectName string, uploadID string, totalChunks int) (string, error)
GenSignedUrl(objectKey string) (string, error)
TrimBucketPrefix(path string) string
CountDirSize(objectKey string) (int64, error)
ReportSizeChanged(objectKey string) error
}

func SelectStorageHelperFromStorageType(storageType entity.StorageType, bucket ...string) StorageHelper {
@@ -91,7 +122,10 @@ func SelectStorageHelperFromStorageType(storageType entity.StorageType, bucket .
return &MinioHelper{
Bucket: bucketName,
}
case entity.URCHIN_V2:
return &UrchinV2Helper{}
}

return nil
}



+ 49
- 4
services/ai_task_service/storage_helper/minio.go View File

@@ -22,6 +22,10 @@ type MinioHelper struct {
Bucket string
}

func (m *MinioHelper) AllocateDatasetNamespace(name, prefix string) (string, error) {
return prefix, nil
}

func (m *MinioHelper) UploadDir(codePath, objectKeyPrefix string) error {
objectKeyPrefix = m.TrimBucketPrefix(objectKeyPrefix)
return UploadDirToMinio(codePath, objectKeyPrefix, "")
@@ -58,6 +62,10 @@ func (m *MinioHelper) GetEndpoint() string {
return setting.Attachment.Minio.Endpoint
}

func (m *MinioHelper) GetDataId(path string) string {
return ""
}

const README = "README"

func (m *MinioHelper) MKDIR(path string, description ...string) error {
@@ -150,16 +158,16 @@ func (m *MinioHelper) GetOneLevelObjectsUnderDirWithMarker(dirPath string, marke
FileName: fileName,
Size: val.Size,
IsDir: false,
RelativePath: dirPath + fileName,
RelativePath: dirPath + "/" + fileName,
FullPath: val.Key,
}
fileInfos = append(fileInfos, fileInfo)
}
for _, val := range r.CommonPrefixes {
fileName := strings.TrimSuffix(strings.TrimPrefix(val.Prefix, dirPath), "/")
fileInfo := storage.FileInfo{
FileName: fileName,
IsDir: true,
RelativePath: dirPath + "/" + fileName,
FileName: fileName,
IsDir: true,
}
fileInfos = append(fileInfos, fileInfo)
}
@@ -209,6 +217,7 @@ func (m *MinioHelper) GetAllObjectsUnderDir(prefix string, maxKeyArray ...int) (
IsDir: isDir,
ParenDir: "",
RelativePath: val.Key,
FullPath: val.Key,
}
fileInfoList = append(fileInfoList, fileInfo)
}
@@ -264,6 +273,7 @@ func (m *MinioHelper) GetAllObjectsUnderDirWithMarker(prefix string, marker stri
IsDir: isDir,
ParenDir: "",
RelativePath: val.Key,
FullPath: val.Key,
}
fileInfoList = append(fileInfoList, fileInfo)
}
@@ -300,6 +310,33 @@ func (m *MinioHelper) GetS3DownloadUrl(key string) string {
return ""
}

func (m *MinioHelper) CountDirSize(objectKey string) (int64, error) {

var fileTotalSize int64
index := 1
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(objectKey, marker, 1000)
if err != nil {
log.Error("CountDirSize: GetAllObjectsUnderDirWithMarker error.dirPath=%s err=%v", objectKey, err)
return 0, err
}
for i := 0; i < len(output.Objects); i++ {
fileTotalSize += output.Objects[i].Size
}

log.Info("CountD:irSize GetAllObjectsUnderDirWithMarker Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}
return fileTotalSize, nil
}

func (m *MinioHelper) CopyDir(sourcePath, targetPath string, filterSuffix []string) error {
log.Info("CopyByPath sourcePath=%s,targetPath=%s", sourcePath, targetPath)
allFiles, _ := m.GetAllObjectsUnderDir(sourcePath)
@@ -350,6 +387,10 @@ func (m *MinioHelper) DeleteDir(dirPath string) (remainingFiles bool, err error)
return false, err
}

func (m *MinioHelper) DeleteCollection(dirPath string) (remainingFiles bool, err error) {
return m.DeleteDir(dirPath)
}

func (m *MinioHelper) DeleteFile(filePath string) error {
filePath = m.TrimBucketPrefix(filePath)

@@ -428,3 +469,7 @@ func (m *MinioHelper) PutString(objectKey string, content string) error {
func (m *MinioHelper) GetOutputObjectKeyPrefix(jobName string, computeResource string, versionName string) string {
return m.GetJobDefaultObjectKeyPrefix(jobName) + "/model/"
}

func (m *MinioHelper) ReportSizeChanged(objectKey string) error {
return nil
}

+ 46
- 0
services/ai_task_service/storage_helper/obs.go View File

@@ -19,6 +19,10 @@ type OBSHelper struct {
Bucket string
}

func (m *OBSHelper) AllocateDatasetNamespace(name, prefix string) (string, error) {
return prefix, nil
}

func (m *OBSHelper) UploadDir(codePath, objectKeyPrefix string) error {
objectKeyPrefix = m.TrimBucketPrefix(objectKeyPrefix)

@@ -52,6 +56,11 @@ func (m *OBSHelper) GetBucket() string {
}
return setting.Bucket
}

func (m *OBSHelper) GetDataId(path string) string {
return ""
}

func (m *OBSHelper) MKDIR(path string, description ...string) error {
path = m.TrimBucketPrefix(path)
path = strings.TrimSuffix(path, "/") + "/"
@@ -126,6 +135,7 @@ func (m *OBSHelper) GetOneLevelObjectsUnderDirWithMarker(dirPath string, marker
Size: val.Size,
IsDir: false,
RelativePath: strings.TrimSuffix(dirPath, "/") + "/" + fileName,
FullPath: val.Key,
}
fileInfos = append(fileInfos, fileInfo)
}
@@ -190,6 +200,7 @@ func (m *OBSHelper) GetAllObjectsUnderDir(prefix string, maxKeyArray ...int) ([]
IsDir: isDir,
ParenDir: "",
RelativePath: val.Key,
FullPath: val.Key,
}
fileInfoList = append(fileInfoList, fileInfo)
}
@@ -253,6 +264,7 @@ func (m *OBSHelper) GetAllObjectsUnderDirWithMarker(prefix string, marker string
IsDir: isDir,
ParenDir: "",
RelativePath: val.Key,
FullPath: val.Key,
}
fileInfoList = append(fileInfoList, fileInfo)
}
@@ -382,6 +394,32 @@ func (m *OBSHelper) CopyFile(sourcePath, targetPath string) error {
return nil
}

func (m *OBSHelper) CountDirSize(objectKey string) (int64, error) {
var fileTotalSize int64
index := 1
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(objectKey, marker, 1000)
if err != nil {
log.Error("CountDirSize: GetAllObjectsUnderDirWithMarker error.dirPath=%s err=%v", objectKey, err)
return 0, err
}
for i := 0; i < len(output.Objects); i++ {
fileTotalSize += output.Objects[i].Size
}

log.Info("CountD:irSize GetAllObjectsUnderDirWithMarker Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}
return fileTotalSize, nil
}

func (m *OBSHelper) DeleteDir(dirPath string) (remainingFiles bool, err error) {
dirPath = m.TrimBucketPrefix(dirPath)
log.Info("DeleteDir filePath=%s", dirPath)
@@ -427,6 +465,10 @@ func (m *OBSHelper) DeleteDir(dirPath string) (remainingFiles bool, err error) {
return remainingFiles, nil
}

func (m *OBSHelper) DeleteCollection(dirPath string) (remainingFiles bool, err error) {
return m.DeleteDir(dirPath)
}

func (m *OBSHelper) DeleteFile(filePath string) error {
filePath = m.TrimBucketPrefix(filePath)

@@ -538,3 +580,7 @@ func (m *OBSHelper) GetOutputObjectKeyPrefix(jobName string, computeResource str
}
return objectkey
}

func (m *OBSHelper) ReportSizeChanged(objectKey string) error {
return nil
}

+ 545
- 0
services/ai_task_service/storage_helper/urchin_v2.go View File

@@ -0,0 +1,545 @@
package storage_helper

import (
"fmt"
"io"
"path"
"strconv"
"strings"

"code.gitea.io/gitea/modules/cloudbrain"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
)

type UrchinV2Helper struct {
Bucket string
}

//objectKey = collectionID + "/" + relativePath
func (c *UrchinV2Helper) splitPath(p string) (collectionID, relativePath string) {
p = strings.Trim(p, "/")
idx := strings.Index(p, "/")
if idx == -1 {
return p, ""
}
return p[:idx], p[idx+1:]
}

func (m *UrchinV2Helper) AllocateDatasetNamespace(name, prefix string) (string, error) {
return storage.UrchinClient.CreateCollection(name)
}

func (m *UrchinV2Helper) UploadDir(localPath, objectKeyPrefix string) error {
collectionId, relativePath := m.splitPath(objectKeyPrefix)
return storage.UrchinClient.UploadLocalDir(collectionId, localPath, relativePath)
}

func (m *UrchinV2Helper) UploadFile(objectKey string, r io.Reader) error {
collectionId, relativePath := m.splitPath(objectKey)
return storage.UrchinClient.UploadFile(collectionId, relativePath, r)
}

func (m *UrchinV2Helper) GetJobDefaultObjectKeyPrefix(jobName string) string {
return path.Join(setting.CodePathPrefix, jobName)
}

func (m *UrchinV2Helper) GetRealPath(objectKey string) string {
return ""
}

func (m *UrchinV2Helper) GetBucket() string {
return ""
}
func (m *UrchinV2Helper) MKDIR(path string, description ...string) error {
collectionId, relativePath := m.splitPath(path)
return storage.UrchinClient.MKDIR(collectionId, relativePath)
}

func (m *UrchinV2Helper) GetEndpoint() string {
return ""
}

func (m *UrchinV2Helper) GetDataId(path string) string {
return path
}

func (m *UrchinV2Helper) GetOneLevelObjectsUnderDir(dirPath string, maxKeyArray ...int) ([]storage.FileInfo, error) {
var maxKey int
if len(maxKeyArray) <= 0 {
maxKey = setting.OUTPUT_SHOW_MAX_KEY + 1
}

index := 1
fileInfoList := make([]storage.FileInfo, 0)
marker := ""
for {
output, err := m.GetOneLevelObjectsUnderDirWithMarker(dirPath, marker, maxKey)
if err != nil {
return nil, err
}
fileInfoList = append(fileInfoList, output.Objects...)

log.Info("GetOneLevelObjectsUnderDir Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}

return fileInfoList, nil
}

func (m *UrchinV2Helper) GetOneLevelObjectsUnderDirWithMarker(dirPath string, marker string, maxKey int) (*ObjectListResponse, error) {
collectionId, relativePath := m.splitPath(dirPath)

if maxKey <= 0 {
maxKey = setting.OUTPUT_SHOW_MAX_KEY
}
if relativePath != "" && !strings.HasSuffix(relativePath, "/") {
relativePath = relativePath + "/"
}
res, err := storage.UrchinClient.ListPrefixObjectsWithMarkerAndDelimeter(collectionId, relativePath, marker, int32(maxKey))
if err != nil {
return nil, err
}
if res == nil {
return &ObjectListResponse{}, nil
}
files := make([]storage.FileInfo, 0, len(res.Data.List))
for _, val := range res.Data.CommonPrefixes {
fileName := strings.TrimSuffix(strings.TrimPrefix(val, relativePath), "/")
// fileName := strings.TrimSuffix(val, "/")
file := storage.FileInfo{
FileName: fileName,
IsDir: true,
}
files = append(files, file)
}
for i := 0; i < len(res.Data.List); i++ {
val := res.Data.List[i]
if val.Key == relativePath {
continue
}
sizeStr := val.Size
var size int64 = 0
if sizeStr != "" {
sizeTmp, err := strconv.ParseInt(sizeStr, 10, 64)
if err == nil {
size = sizeTmp
}
}
file := storage.FileInfo{
ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"),
FileName: strings.TrimPrefix(val.Key[len(relativePath):], "/"),
Size: size,
IsDir: false,
ParenDir: "",
RelativePath: val.Key,
FullPath: path.Join(collectionId, strings.TrimPrefix(val.Key, collectionId)),
}
files = append(files, file)
}

return &ObjectListResponse{
NextMarker: res.Data.NextMarker,
IsTruncated: res.Data.IsTruncated,
Objects: files,
}, nil

}

func (m *UrchinV2Helper) GetAllObjectsUnderDir(prefix string, maxKeyArray ...int) ([]storage.FileInfo, error) {
index := 1
fileInfoList := make([]storage.FileInfo, 0)
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(prefix, marker, maxKeyArray...)
if err != nil {
return nil, err
}
fileInfoList = append(fileInfoList, output.Objects...)

log.Info("GetAllObjectsUnderDir Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}

return fileInfoList, nil
}

func (m *UrchinV2Helper) GetAllObjectsUnderDirWithMarker(prefix string, marker string, maxKeyArray ...int) (*ObjectListResponse, error) {
collectionId, relativePath := m.splitPath(prefix)

var maxKey int
if len(maxKeyArray) <= 0 {
maxKey = 1000
} else {
maxKey = maxKeyArray[0]
}
res, err := storage.UrchinClient.ListPrefixObjectsWithMarker(collectionId, marker, relativePath, int32(maxKey))
if err != nil {
return nil, err
}
if res == nil || len(res.Data.List) == 0 {
return &ObjectListResponse{}, nil
}
files := make([]storage.FileInfo, 0, len(res.Data.List))

for i := 0; i < len(res.Data.List); i++ {
val := res.Data.List[i]
if val.Key == relativePath {
continue
}
sizeStr := val.Size
var size int64 = 0
if sizeStr != "" {
sizeTmp, err := strconv.ParseInt(sizeStr, 10, 64)
if err == nil {
size = sizeTmp
}
}
var isDir bool
if strings.HasSuffix(val.Key, "/") {
isDir = true
} else {
isDir = false
}
file := storage.FileInfo{
ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"),
FileName: strings.TrimPrefix(val.Key[len(relativePath):], "/"),
Size: size,
IsDir: isDir,
ParenDir: "",
RelativePath: val.Key,
FullPath: path.Join(collectionId, strings.TrimPrefix(val.Key, collectionId)),
}
files = append(files, file)
}

return &ObjectListResponse{
NextMarker: res.Data.NextMarker,
IsTruncated: res.Data.IsTruncated,
Objects: files,
}, nil
}

func (m *UrchinV2Helper) TrimBucketPrefix(path string) string {
return path
}

func (m *UrchinV2Helper) OpenFile(path string) (io.ReadCloser, error) {
collectionId, relativePath := m.splitPath(path)
res, err := storage.UrchinClient.GetObject(collectionId, relativePath)
if err != nil {
return nil, err
}
return res.Body, nil
}

func (m *UrchinV2Helper) GetObject(path string) (*ObjectResponse, error) {
collectionId, relativePath := m.splitPath(path)

res, err := storage.UrchinClient.GetObject(collectionId, relativePath)
if err != nil {
return nil, err
}
return &ObjectResponse{
ContentLength: res.ContentLength,
ContentType: res.ContentType,
Body: res.Body,
}, nil
}

func (m *UrchinV2Helper) GetObjectMeta(path string) (*ObjectMeta, error) {
collectionId, relativePath := m.splitPath(path)

res, err := storage.UrchinClient.GetObjectMeta(collectionId, relativePath)
if err != nil {
return nil, err
}
return &ObjectMeta{
ContentLength: res.ContentLength,
ContentType: res.ContentType,
LastModified: res.LastModified,
}, nil
}

func (m *UrchinV2Helper) GetSignedDownloadUrl(path string) (string, error) {
collectionId, relativePath := m.splitPath(path)

url, err := storage.UrchinClient.GetObjectSignedUrl(collectionId, relativePath)
if err != nil {
return "", err
}
return url, nil
}

func (m *UrchinV2Helper) GetS3DownloadUrl(path string) string {
return ""
}

func (m *UrchinV2Helper) CopyDir(sourcePath, targetPath string, filterSuffix []string) error {
sourceCollectionId, sourceRelativePath := m.splitPath(sourcePath)
targetCollectionId, targetRelativePath := m.splitPath(targetPath)

index := 1
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(sourcePath, marker, 1000)
if err != nil || output == nil {
log.Error("CopyDir GetAllObjectsUnderDirWithMarker error.ourcePath=%s targetPath=%s err=%v", sourcePath, targetPath, err)
return err
}
for i := 0; i < len(output.Objects); i++ {
file := output.Objects[i]
if isMatchSuffix(file.FileName, filterSuffix) {
continue
}
sourceFilePath := path.Join(sourceRelativePath, file.RelativePath)
targetFilePath := path.Join(targetRelativePath, file.RelativePath)
err := storage.UrchinClient.CopyObject(sourceCollectionId, sourceFilePath, targetCollectionId, targetFilePath)
if err != nil {
log.Error("CopyDir CopyObject error.ourcePath=%s targetPath=%s err=%v", sourcePath, targetPath, err)
return err
}
}

log.Info("CopyDir GetAllObjectsUnderDirWithMarker Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}
return nil
}

func (m *UrchinV2Helper) CopyFile(sourcePath, targetPath string) error {
sourceCollectionId, sourceRelativePath := m.splitPath(sourcePath)

targetCollectionId, targetRelativePath := m.splitPath(targetPath)

return storage.UrchinClient.CopyObject(sourceCollectionId, sourceRelativePath, targetCollectionId, targetRelativePath)
}

func (m *UrchinV2Helper) DeleteDir(dirPath string) (remainingFiles bool, err error) {
collectionId, relativePath := m.splitPath(dirPath)

index := 1
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(dirPath, marker, 1000)
if err != nil {
log.Error("DeleteDir GetAllObjectsUnderDirWithMarker error.dirPath=%s err=%v", dirPath, err)
return true, err
}
for i := 0; i < len(output.Objects); i++ {
file := output.Objects[i]
filePath := path.Join(relativePath, file.RelativePath)
err := storage.UrchinClient.DeleteFile(collectionId, filePath)
if err != nil {
log.Error("DeleteDir: delete file error.collectionId=%s filePath=%s err=%v", collectionId, filePath, err)
remainingFiles = true
continue
}
}

log.Info("DeleteDir GetAllObjectsUnderDirWithMarker Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}
return false, nil
}

func (m *UrchinV2Helper) DeleteCollection(dirPath string) (remainingFiles bool, err error) {
collectionId, _ := m.splitPath(dirPath)

err = storage.UrchinClient.DeleteObject(collectionId)
if err != nil {
log.Error("DeleteCollection: delete object error.collectionId=%s err=%v", collectionId, err)
return true, err
}
return false, nil
}

func (m *UrchinV2Helper) DeleteFile(path string) error {
collectionId, relativePath := m.splitPath(path)

return storage.UrchinClient.DeleteFile(collectionId, relativePath)
}

func (m *UrchinV2Helper) HasObject(path string) (bool, error) {
collectionId, relativePath := m.splitPath(path)

res, err := storage.UrchinClient.GetObjectMeta(collectionId, relativePath)
if err != nil || res == nil {
return false, nil
}
return true, nil
}

func (m *UrchinV2Helper) GetPartInfos(objectName string, uploadID string) ([]PartInfo, error) {
result := make([]PartInfo, 0)

output := &obs.ListPartsOutput{}
partNumberMarker := 0
for {
temp, err := storage.ObsCli.ListParts(&obs.ListPartsInput{
Bucket: m.GetBucket(),
Key: objectName,
UploadId: uploadID,
MaxParts: MAX_LIST_PARTS,
PartNumberMarker: partNumberMarker,
})
if err != nil {
log.Error("ListParts failed:", err.Error())
return nil, err
}

partNumberMarker = temp.NextPartNumberMarker
log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", objectName, temp.MaxParts, temp.PartNumberMarker, temp.NextPartNumberMarker, len(temp.Parts))

for _, partInfo := range temp.Parts {
output.Parts = append(output.Parts, obs.Part{
PartNumber: partInfo.PartNumber,
ETag: partInfo.ETag,
})
}

if !temp.IsTruncated {
break
} else {
continue
}
}
for _, part := range output.Parts {
result = append(result, PartInfo{
PartNumber: part.PartNumber,
ETag: part.ETag,
Size: part.Size,
LastModified: part.LastModified,
})
}
return result, nil
}

func (m *UrchinV2Helper) NewMultiPartUpload(objectName string) (string, error) {
collectionId, relativePath := m.splitPath(objectName)
return storage.UrchinClient.InitiateMultipartUpload(collectionId, relativePath)
}

func (m *UrchinV2Helper) GenMultiPartSignedUrl(objectName string, uploadId string, partNumber int, partSize int64) (string, error) {
collectionId, relativePath := m.splitPath(objectName)
return storage.UrchinClient.CreateMultipartUploadSignedUrl(collectionId, uploadId, relativePath, partNumber)
}

func (m *UrchinV2Helper) GenSignedUrl(objectKey string) (string, error) {
collectionId, relativePath := m.splitPath(objectKey)
return storage.UrchinClient.CreatePutObjectSignedUrl(collectionId, relativePath)

}

func (m *UrchinV2Helper) CompleteMultiPartUpload(objectName string, uploadID string, totalChunks int) (string, error) {
collectionId, relativePath := m.splitPath(objectName)
return "", storage.UrchinClient.CompleteMultiPartUpload(collectionId, uploadID, relativePath, totalChunks)
}

func (m *UrchinV2Helper) GetAllObjectByBucketAndPrefix(bucket string, prefix string) ([]storage.FileInfo, error) {
return nil, nil

}

func (m *UrchinV2Helper) GetFilesSize(Files []string) int64 {
var fileTotalSize int64
for _, file := range Files {
collectionId, relativePath := m.splitPath(file)
out, err := storage.UrchinClient.GetObjectMeta(collectionId, relativePath)
if err != nil {
log.Info("Get File error, error=" + err.Error())
continue
}
fileTotalSize += out.ContentLength
}
return fileTotalSize

}
func (m *UrchinV2Helper) RemoveObject(path string) error {
remainingFiles, err := m.DeleteDir(path)
if err != nil {
return err
}
if remainingFiles {
return fmt.Errorf("Some files failed to be deleted.")
}
return nil
}

func (m *UrchinV2Helper) PutString(objectKey string, content string) error {
return m.UploadFile(objectKey, strings.NewReader(content))

}

func (m *UrchinV2Helper) GetOutputObjectKeyPrefix(jobName string, computeResource string, versionName string) string {
objectkey := path.Join(m.GetJobDefaultObjectKeyPrefix(jobName), setting.OutPutPath, versionName) + "/"
if computeResource != "NPU" {
objectkey = path.Join(m.GetJobDefaultObjectKeyPrefix(jobName), cloudbrain.ModelMountPath, versionName) + "/"
}
return objectkey
}

func (m *UrchinV2Helper) CountDirSize(objectKey string) (int64, error) {
var fileTotalSize int64
index := 1
marker := ""
for {
output, err := m.GetAllObjectsUnderDirWithMarker(objectKey, marker, 1000)
if err != nil {
log.Error("CountDirSize: GetAllObjectsUnderDirWithMarker error.dirPath=%s err=%v", objectKey, err)
return 0, err
}
for i := 0; i < len(output.Objects); i++ {
fileTotalSize += output.Objects[i].Size
}

log.Info("CountD:irSize GetAllObjectsUnderDirWithMarker Page:%d\n", index)
index++

if output.IsTruncated {
marker = output.NextMarker
} else {
break
}
}
return fileTotalSize, nil
}

func (m *UrchinV2Helper) ReportSizeChanged(objectKey string) error {
collectionId, _ := m.splitPath(objectKey)
err := storage.UrchinClient.ReportSizeChanged(collectionId)
if err != nil {
log.Error("ReportSizeChanged err. objectKey=%s err=%v", err)
}
err2 := storage.UrchinClient.DeleteObjectDeployment(collectionId)
if err2 != nil {
log.Error("ReportSizeChanged err. objectKey=%s err=%v", err2)
}
return err
}

+ 39
- 23
services/subject_service/aimodel_service.go View File

@@ -1,6 +1,13 @@
package subject_service

import (
"encoding/json"
"errors"
"fmt"
"path"
"strings"
"time"

"code.gitea.io/gitea/entity"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
@@ -11,13 +18,7 @@ import (
"code.gitea.io/gitea/services/ai_task_service/storage_helper"
"code.gitea.io/gitea/services/cloudbrain/resource"
"code.gitea.io/gitea/services/storage_limit"
"encoding/json"
"errors"
"fmt"
uuid "github.com/satori/go.uuid"
"path"
"strings"
"time"
)

func CreateAimodel(doer, owner *models.User, req entity.CreateAimodelReq) (*models.AiModelManage, *response.BizError) {
@@ -111,6 +112,13 @@ func CreateAimodel(doer, owner *models.User, req entity.CreateAimodelReq) (*mode
}

aimodelId := uuid.NewV4().String()
prefix := path.Join(stroageHelper.GetBucket(), uploaderHelper.GetStoragePathPrefix(aimodelId)) + "/"
path, err := stroageHelper.AllocateDatasetNamespace(req.Name, prefix)
if err != nil {
log.Error("AllocateDatasetNamespace failed, prefix=%s, err=%v", req.Name, err)
return nil, response.NewBizError(err)
}

aimodelDTO := &models.AiModelManage{
ID: aimodelId,
Name: req.Name,
@@ -125,7 +133,7 @@ func CreateAimodel(doer, owner *models.User, req entity.CreateAimodelReq) (*mode
Engine: req.Engine,
IsPrivate: req.IsPrivate,
StorageType: string(storageType),
Path: path.Join(stroageHelper.GetBucket(), uploaderHelper.GetStoragePathPrefix(aimodelId)) + "/",
Path: path,
ExternalName: req.ExternalName,
TrainTaskInfo: trainTaskInfo,
Status: AimodelStatus,
@@ -146,23 +154,30 @@ func DeleteAimodel(doer *models.User, aimodel *models.AiModelManage) error {
return err
}

err = models.DeleteAimodel(doer, aimodel.OwnerID, aimodel.ID)
if err != nil {
log.Error("DeleteAimodel err,dataset=%+v,err=%v", aimodel, err)
}
go DeleteAimodelStorage(aimodel)
notification.NotifyDeleteAimodel(doer, owner, aimodel)

// delete hf transfer file record db
if aimodel.ModelType == models.MODEL_HF_TYPE {
err := models.DeleteHfFilesByModelId(aimodel.ID)
err = models.WithTx(func(ctx models.DBContext) error {
err := models.DeleteAimodel(ctx, doer, aimodel.OwnerID, aimodel.ID)
if err != nil {
log.Info("DeleteHfFilesByModelId error." + err.Error())
log.Error("DeleteAimodel err,dataset=%+v,err=%v", aimodel, err)
return err
}
}
err = DeleteAimodelStorage(aimodel)
if err != nil {
log.Error("DeleteAimodel DeleteAimodelStorage err,aimodel=%+v,err=%v", aimodel, err)
return err
}
// delete hf transfer file record db
if aimodel.ModelType == models.MODEL_HF_TYPE {
err := models.DeleteHfFilesByModelIdWithContext(ctx, aimodel.ID)
if err != nil {
log.Info("DeleteHfFilesByModelId error." + err.Error())
return err
}
}
notification.NotifyDeleteAimodel(doer, owner, aimodel)
return nil
})

return nil
return err
}

func DeleteAimodelStorage(aimodel *models.AiModelManage) error {
@@ -172,16 +187,17 @@ func DeleteAimodelStorage(aimodel *models.AiModelManage) error {
log.Error("PANIC:%v", combinedErr)
}
}()

helper := storage_helper.SelectStorageHelperFromStorageType(entity.StorageType(aimodel.StorageType))
if helper == nil {
log.Error("GetUploadHelper failed, dataType=%d", models.AimodelSubject)
return errors.New("Storage type error")
}
remainingFiles, err := helper.DeleteDir(aimodel.Path)
remainingFiles, err := helper.DeleteCollection(aimodel.Path)
if err == nil && remainingFiles {
tmpErr := models.InsertStorageDeleteFailedAimodel(aimodel)
if tmpErr != nil {
log.Error("InsertStorageDeleteFailedDataset failed, datasetId=%s, datasetName=%s err=%v", aimodel.ID, aimodel.Name, tmpErr)
log.Error("InsertStorageDeleteFailedAimodel failed, aimodelId=%s, datasetName=%s err=%v", aimodel.ID, aimodel.Name, tmpErr)
}
}
return err
@@ -287,7 +303,6 @@ func PutAimodelReadme(req entity.AimodelReadmeReq) error {
}
path := path.Join(aimodel.Path, README_FILE_NAME)
err := storageHelper.UploadFile(path, strings.NewReader(req.Content))
DoAfterAimodelFileChanged(aimodel.ID)
return err
}

@@ -410,6 +425,7 @@ func UpdateAimodelSize(aimodelId string) {
for _, file := range files {
size += file.Size
}
helper.ReportSizeChanged(aimodel.Path)
models.UpdateAimodelBySize(aimodelId, size)
}



+ 7
- 1
services/subject_service/aimodel_uploader_helper.go View File

@@ -48,7 +48,13 @@ func (h *AimodelUploaderHelper) GetStorageType() entity.StorageType {
if h.Aimodel != nil {
return entity.StorageType(h.Aimodel.StorageType)
}
return entity.StorageType(strings.ToUpper(setting.StorageDefaultType))
var storageType string
if setting.AIMODEL_STORAGE_TYPE != "" {
storageType = setting.AIMODEL_STORAGE_TYPE
} else {
storageType = setting.StorageDefaultType
}
return entity.StorageType(strings.ToUpper(storageType))
}

func (h *AimodelUploaderHelper) DoAfterUploadedSuccess(dataID string) error {


+ 27
- 40
services/subject_service/dataset_service.go View File

@@ -16,7 +16,6 @@ import (
"code.gitea.io/gitea/modules/redis/redis_client"
"code.gitea.io/gitea/modules/redis/redis_key"
"code.gitea.io/gitea/modules/redis/redis_lock"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/ai_task_service/storage_helper"
"code.gitea.io/gitea/services/storage_limit"
@@ -91,6 +90,13 @@ func CreateDataset(doer, owner *models.User, req entity.CreateDatasetReq) (*mode

dataId := gouuid.NewV4().String()

prefix := path.Join(stroageHelper.GetBucket(), uploaderHelper.GetStoragePathPrefix(dataId)) + "/"
path, err := stroageHelper.AllocateDatasetNamespace(req.Name, prefix)
if err != nil {
log.Error("AllocateDatasetNamespace failed, prefix=%s, err=%v", req.Name, err)
return nil, response.NewBizError(err)
}

datasetDTO := &models.DatasetRegistry{
ID: dataId,
Name: req.Name,
@@ -104,13 +110,14 @@ func CreateDataset(doer, owner *models.User, req entity.CreateDatasetReq) (*mode
CreatorID: req.CreatorId,
OwnerID: req.OwnerId,
StorageType: string(storageType),
Path: path.Join(stroageHelper.GetBucket(), uploaderHelper.GetStoragePathPrefix(dataId)) + "/",
Path: path,
}
err = models.CreateDatasetRegistry(datasetDTO, doer)
if err != nil {
log.Error("CreateDatasetRegistry failed, name=%s, err=%v", req.Name, err)
return nil, response.NewBizError(err)
}

go notification.NotifyCreateDataset(doer, owner, datasetDTO)

return datasetDTO, nil
@@ -223,30 +230,6 @@ func GetDataset(id string) (*entity.DatasetInfo, error) {

const README_FILE_NAME = "README.md"

func GetDatasetFileList(datasetId string, parentDir string) ([]storage.FileInfo, error) {
dataset, err := models.GetDatasetRegistryByID(datasetId)
if err != nil {
log.Error("GetDatasetV2ByID failed, datasetId=%d err=%v", datasetId, err)
return nil, err
}
objectKey := path.Join(dataset.Path, parentDir)
helper := storage_helper.SelectStorageHelperFromStorageType(entity.StorageType(dataset.StorageType))
if helper == nil {
log.Error("SelectStorageHelperFromStorageType failed, StorageType=%d", dataset.StorageType)
return nil, errors.New("Storage type error")
}
fileList, err := helper.GetOneLevelObjectsUnderDir(objectKey)
if err != nil {
log.Error("GetOneLevelObjectsUnderDir failed, objectKey=%s err=%v", objectKey, err)
return nil, err
}

if fileList == nil {
fileList = make([]storage.FileInfo, 0)
}
return fileList, nil
}

func DeleteDatasetFile(dataset *models.DatasetRegistry, parentDir, fileName string) error {
objectKey := path.Join(dataset.Path, parentDir, fileName)
helper := storage_helper.SelectStorageHelperFromStorageType(entity.StorageType(dataset.StorageType))
@@ -269,15 +252,22 @@ func DeleteDataset(doer *models.User, dataset *models.DatasetRegistry) error {
log.Error("GetUserByID failed, OwnerID=%s err=%v", dataset.OwnerID, err)
return err
}
err = models.WithTx(func(ctx models.DBContext) error {
err := models.DeleteDatasetRegistry(ctx, doer, dataset.OwnerID, dataset.ID)
if err != nil {
log.Error("DeleteDataset err,dataset=%+v,err=%v", dataset, err)
return err
}
err = DeleteDatasetStorage(dataset)
if err != nil {
log.Error("DeleteDataset DeleteDatasetStorage err,dataset=%+v,err=%v", dataset, err)
return err
}
notification.NotifyDeleteDataset(doer, owner, dataset)
return nil
})

err = models.DeleteDatasetRegistry(doer, dataset.OwnerID, dataset.ID)
if err != nil {
log.Error("DeleteDataset err,dataset=%+v,err=%v", dataset, err)
}
go DeleteDatasetStorage(dataset)
notification.NotifyDeleteDataset(doer, owner, dataset)

return nil
return err
}

func DeleteDatasetStorage(dataset *models.DatasetRegistry) error {
@@ -292,7 +282,7 @@ func DeleteDatasetStorage(dataset *models.DatasetRegistry) error {
log.Error("GetUploadHelper failed, dataType=%d", models.DatasetSubject)
return errors.New("Storage type error")
}
remainingFiles, err := helper.DeleteDir(dataset.Path)
remainingFiles, err := helper.DeleteCollection(dataset.Path)
if err == nil && remainingFiles {
tmpErr := models.InsertStorageDeleteFailedDataset(dataset)
if tmpErr != nil {
@@ -366,15 +356,12 @@ func UpdateDatasetRegistrySize(datasetId string) {
log.Error("SelectStorageHelperFromStorageType failed, StorageType=%d", dataset.StorageType)
return
}
files, err := helper.GetAllObjectsUnderDir(dataset.Path)
size, err := helper.CountDirSize(dataset.Path)
if err != nil {
log.Error("Failed to query size . id=%s err=%v", datasetId, err)
return
}
var size int64
for _, file := range files {
size += file.Size
}
helper.ReportSizeChanged(dataset.Path)
models.UpdateDatasetRegistrySize(datasetId, size)
}



+ 7
- 1
services/subject_service/dataset_uploader_helper.go View File

@@ -49,7 +49,13 @@ func (h *DatasetUploaderHelper) GetStorageType() entity.StorageType {
if h.Dataset != nil {
return entity.StorageType(h.Dataset.StorageType)
}
return entity.StorageType(strings.ToUpper(setting.StorageDefaultType))
var storageType string
if setting.DATASET_STORAGE_TYPE != "" {
storageType = setting.DATASET_STORAGE_TYPE
} else {
storageType = setting.StorageDefaultType
}
return entity.StorageType(strings.ToUpper(storageType))
}

func (h *DatasetUploaderHelper) DoAfterUploadedSuccess(dataID string) error {


+ 6
- 10
services/subject_service/upload_service.go View File

@@ -82,12 +82,11 @@ func GetFileChunks(req entity.UploadChunkRequest) (*entity.UploadChunkResponse,
log.Error("UpdateFileChunk failed:%v", err)
}
}
fileNameUploaded := strings.Split(fileChunk.ObjectName, subjectId+"/")[1]
return &entity.UploadChunkResponse{
UUID: fileChunk.UUID,
Uploaded: fileChunk.IsUploaded,
Chunks: chunks,
FileName: fileNameUploaded,
FileName: fileName,
}, nil
}
if fileChunk.IsUploaded == models.DataFileNotUploaded {
@@ -153,13 +152,6 @@ func NewUploadMultipart(req entity.NewMultipartRequest) (*entity.NewMuiltipartRe

isLimited := storage_limit.IsSubjectFileUploadOverLimit(req.Size, req.SubjectAccessContext)
if isLimited {
if req.SubjectAccessContext.Owner.IsOrganization() {
if req.SubjectAccessContext.SubjectType == models.DatasetSubject {
return nil, errors.New("dataset_registry.org_not_allowed_to_upload_dataset_file")
} else {
return nil, errors.New("aimodel.org_not_allowed_to_upload_aimodel_file")
}
}
return nil, errors.New("common_error.file_size_storage_limit")
}

@@ -170,7 +162,10 @@ func NewUploadMultipart(req entity.NewMultipartRequest) (*entity.NewMuiltipartRe
uuid := util.UUID()
objectName := uploadHelper.GetFileStoragePath(req.FileName, req.SubjectId)
uploadID, err := storageHelper.NewMultiPartUpload(objectName)

if err != nil {
log.Error("NewMultiPartUpload err.req = %+v err=%v", req, err)
return nil, err
}
_, err = models.InsertUploadChunk(&models.UploadChunk{
UUID: uuid,
Md5: req.MD5,
@@ -308,5 +303,6 @@ func CompleteUpload(req entity.CompleteUploadRequest) error {
}
go uploadHelper.DoAfterUploadedSuccess(req.SubjectContext.SubjectID)
go UpdateFlowCacheAfterUploaded(int(req.SubjectContext.SubjectType), req.SubjectContext.SubjectID, req.FileNameList...)

return nil
}

+ 1
- 0
vendor/gitea.com/macaron/csrf/csrf.go View File

@@ -222,6 +222,7 @@ func Generate(options ...Options) macaron.Handler {
needsNew = true
}
}

if needsNew {
// FIXME: actionId.
x.Token = GenerateToken(x.Secret, x.ID, "POST")


+ 1
- 0
vendor/gitea.com/macaron/csrf/xsrf.go View File

@@ -90,6 +90,7 @@ func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool {
}

expected := generateTokenAtTime(key, userID, actionID, issueTime)

// Check that the token matches the expected value.
// Use constant time comparison to avoid timing attacks.
return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1


+ 0
- 420
vendor/github.com/golang/mock/gomock/call.go View File

@@ -1,420 +0,0 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package gomock

import (
"fmt"
"reflect"
"strconv"
"strings"
)

// Call represents an expected call to a mock.
type Call struct {
t TestHelper // for triggering test failures on invalid call setup

receiver interface{} // the receiver of the method call
method string // the name of the method
methodType reflect.Type // the type of the method
args []Matcher // the args
origin string // file and line number of call setup

preReqs []*Call // prerequisite calls

// Expectations
minCalls, maxCalls int

numCalls int // actual number made

// actions are called when this Call is called. Each action gets the args and
// can set the return values by returning a non-nil slice. Actions run in the
// order they are created.
actions []func([]interface{}) []interface{}
}

// newCall creates a *Call. It requires the method type in order to support
// unexported methods.
func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
t.Helper()

// TODO: check arity, types.
margs := make([]Matcher, len(args))
for i, arg := range args {
if m, ok := arg.(Matcher); ok {
margs[i] = m
} else if arg == nil {
// Handle nil specially so that passing a nil interface value
// will match the typed nils of concrete args.
margs[i] = Nil()
} else {
margs[i] = Eq(arg)
}
}

origin := callerInfo(3)
actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
// Synthesize the zero value for each of the return args' types.
rets := make([]interface{}, methodType.NumOut())
for i := 0; i < methodType.NumOut(); i++ {
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
}
return rets
}}
return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
}

// AnyTimes allows the expectation to be called 0 or more times
func (c *Call) AnyTimes() *Call {
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
return c
}

// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also
// sets the maximum number of calls to infinity.
func (c *Call) MinTimes(n int) *Call {
c.minCalls = n
if c.maxCalls == 1 {
c.maxCalls = 1e8
}
return c
}

// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also
// sets the minimum number of calls to 0.
func (c *Call) MaxTimes(n int) *Call {
c.maxCalls = n
if c.minCalls == 1 {
c.minCalls = 0
}
return c
}

// DoAndReturn declares the action to run when the call is matched.
// The return values from this function are returned by the mocked function.
// It takes an interface{} argument to support n-arity functions.
func (c *Call) DoAndReturn(f interface{}) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)

c.addAction(func(args []interface{}) []interface{} {
vargs := make([]reflect.Value, len(args))
ft := v.Type()
for i := 0; i < len(args); i++ {
if args[i] != nil {
vargs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vargs[i] = reflect.Zero(ft.In(i))
}
}
vrets := v.Call(vargs)
rets := make([]interface{}, len(vrets))
for i, ret := range vrets {
rets[i] = ret.Interface()
}
return rets
})
return c
}

// Do declares the action to run when the call is matched. The function's
// return values are ignored to retain backward compatibility. To use the
// return values call DoAndReturn.
// It takes an interface{} argument to support n-arity functions.
func (c *Call) Do(f interface{}) *Call {
// TODO: Check arity and types here, rather than dying badly elsewhere.
v := reflect.ValueOf(f)

c.addAction(func(args []interface{}) []interface{} {
vargs := make([]reflect.Value, len(args))
ft := v.Type()
for i := 0; i < len(args); i++ {
if args[i] != nil {
vargs[i] = reflect.ValueOf(args[i])
} else {
// Use the zero value for the arg.
vargs[i] = reflect.Zero(ft.In(i))
}
}
v.Call(vargs)
return nil
})
return c
}

// Return declares the values to be returned by the mocked function call.
func (c *Call) Return(rets ...interface{}) *Call {
c.t.Helper()

mt := c.methodType
if len(rets) != mt.NumOut() {
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
}
for i, ret := range rets {
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
// Identical types; nothing to do.
} else if got == nil {
// Nil needs special handling.
switch want.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
// ok
default:
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
i, c.receiver, c.method, want, c.origin)
}
} else if got.AssignableTo(want) {
// Assignable type relation. Make the assignment now so that the generated code
// can return the values with a type assertion.
v := reflect.New(want).Elem()
v.Set(reflect.ValueOf(ret))
rets[i] = v.Interface()
} else {
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
i, c.receiver, c.method, got, want, c.origin)
}
}

c.addAction(func([]interface{}) []interface{} {
return rets
})

return c
}

// Times declares the exact number of times a function call is expected to be executed.
func (c *Call) Times(n int) *Call {
c.minCalls, c.maxCalls = n, n
return c
}

// SetArg declares an action that will set the nth argument's value,
// indirected through a pointer. Or, in the case of a slice, SetArg
// will copy value's elements into the nth argument.
func (c *Call) SetArg(n int, value interface{}) *Call {
c.t.Helper()

mt := c.methodType
// TODO: This will break on variadic methods.
// We will need to check those at invocation time.
if n < 0 || n >= mt.NumIn() {
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
n, mt.NumIn(), c.origin)
}
// Permit setting argument through an interface.
// In the interface case, we don't (nay, can't) check the type here.
at := mt.In(n)
switch at.Kind() {
case reflect.Ptr:
dt := at.Elem()
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
n, vt, dt, c.origin)
}
case reflect.Interface:
// nothing to do
case reflect.Slice:
// nothing to do
default:
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]",
n, at, c.origin)
}

c.addAction(func(args []interface{}) []interface{} {
v := reflect.ValueOf(value)
switch reflect.TypeOf(args[n]).Kind() {
case reflect.Slice:
setSlice(args[n], v)
default:
reflect.ValueOf(args[n]).Elem().Set(v)
}
return nil
})
return c
}

// isPreReq returns true if other is a direct or indirect prerequisite to c.
func (c *Call) isPreReq(other *Call) bool {
for _, preReq := range c.preReqs {
if other == preReq || preReq.isPreReq(other) {
return true
}
}
return false
}

// After declares that the call may only match after preReq has been exhausted.
func (c *Call) After(preReq *Call) *Call {
c.t.Helper()

if c == preReq {
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
}
if preReq.isPreReq(c) {
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
}

c.preReqs = append(c.preReqs, preReq)
return c
}

// Returns true if the minimum number of calls have been made.
func (c *Call) satisfied() bool {
return c.numCalls >= c.minCalls
}

// Returns true iff the maximum number of calls have been made.
func (c *Call) exhausted() bool {
return c.numCalls >= c.maxCalls
}

func (c *Call) String() string {
args := make([]string, len(c.args))
for i, arg := range c.args {
args[i] = arg.String()
}
arguments := strings.Join(args, ", ")
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
}

// Tests if the given call matches the expected call.
// If yes, returns nil. If no, returns error with message explaining why it does not match.
func (c *Call) matches(args []interface{}) error {
if !c.methodType.IsVariadic() {
if len(args) != len(c.args) {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}

for i, m := range c.args {
if !m.Matches(args[i]) {
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i], m)
}
}
} else {
if len(c.args) < c.methodType.NumIn()-1 {
return fmt.Errorf("Expected call at %s has the wrong number of matchers. Got: %d, want: %d",
c.origin, len(c.args), c.methodType.NumIn()-1)
}
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
c.origin, len(args), len(c.args))
}
if len(args) < len(c.args)-1 {
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
c.origin, len(args), len(c.args)-1)
}

for i, m := range c.args {
if i < c.methodType.NumIn()-1 {
// Non-variadic args
if !m.Matches(args[i]) {
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i], m)
}
continue
}
// The last arg has a possibility of a variadic argument, so let it branch

// sample: Foo(a int, b int, c ...int)
if i < len(c.args) && i < len(args) {
if m.Matches(args[i]) {
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
// Got Foo(a, b) want Foo(matcherA, matcherB)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
continue
}
}

// The number of actual args don't match the number of matchers,
// or the last matcher is a slice and the last arg is not.
// If this function still matches it is because the last matcher
// matches all the remaining arguments or the lack of any.
// Convert the remaining arguments, if any, into a slice of the
// expected type.
vargsType := c.methodType.In(c.methodType.NumIn() - 1)
vargs := reflect.MakeSlice(vargsType, 0, len(args)-i)
for _, arg := range args[i:] {
vargs = reflect.Append(vargs, reflect.ValueOf(arg))
}
if m.Matches(vargs.Interface()) {
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
break
}
// Wrong number of matchers or not match. Fail.
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
c.origin, strconv.Itoa(i), args[i:], c.args[i])

}
}

// Check that all prerequisite calls have been satisfied.
for _, preReqCall := range c.preReqs {
if !preReqCall.satisfied() {
return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
c.origin, preReqCall, c)
}
}

// Check that the call is not exhausted.
if c.exhausted() {
return fmt.Errorf("Expected call at %s has already been called the max number of times.", c.origin)
}

return nil
}

// dropPrereqs tells the expected Call to not re-check prerequisite calls any
// longer, and to return its current set.
func (c *Call) dropPrereqs() (preReqs []*Call) {
preReqs = c.preReqs
c.preReqs = nil
return
}

func (c *Call) call(args []interface{}) []func([]interface{}) []interface{} {
c.numCalls++
return c.actions
}

// InOrder declares that the given calls should occur in order.
func InOrder(calls ...*Call) {
for i := 1; i < len(calls); i++ {
calls[i].After(calls[i-1])
}
}

func setSlice(arg interface{}, v reflect.Value) {
va := reflect.ValueOf(arg)
for i := 0; i < v.Len(); i++ {
va.Index(i).Set(v.Index(i))
}
}

func (c *Call) addAction(action func([]interface{}) []interface{}) {
c.actions = append(c.actions, action)
}

+ 0
- 108
vendor/github.com/golang/mock/gomock/callset.go View File

@@ -1,108 +0,0 @@
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package gomock

import (
"bytes"
"fmt"
)

// callSet represents a set of expected calls, indexed by receiver and method
// name.
type callSet struct {
// Calls that are still expected.
expected map[callSetKey][]*Call
// Calls that have been exhausted.
exhausted map[callSetKey][]*Call
}

// callSetKey is the key in the maps in callSet
type callSetKey struct {
receiver interface{}
fname string
}

func newCallSet() *callSet {
return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)}
}

// Add adds a new expected call.
func (cs callSet) Add(call *Call) {
key := callSetKey{call.receiver, call.method}
m := cs.expected
if call.exhausted() {
m = cs.exhausted
}
m[key] = append(m[key], call)
}

// Remove removes an expected call.
func (cs callSet) Remove(call *Call) {
key := callSetKey{call.receiver, call.method}
calls := cs.expected[key]
for i, c := range calls {
if c == call {
// maintain order for remaining calls
cs.expected[key] = append(calls[:i], calls[i+1:]...)
cs.exhausted[key] = append(cs.exhausted[key], call)
break
}
}
}

// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) {
key := callSetKey{receiver, method}

// Search through the expected calls.
expected := cs.expected[key]
var callsErrors bytes.Buffer
for _, call := range expected {
err := call.matches(args)
if err != nil {
fmt.Fprintf(&callsErrors, "\n%v", err)
} else {
return call, nil
}
}

// If we haven't found a match then search through the exhausted calls so we
// get useful error messages.
exhausted := cs.exhausted[key]
for _, call := range exhausted {
if err := call.matches(args); err != nil {
fmt.Fprintf(&callsErrors, "\n%v", err)
}
}

if len(expected)+len(exhausted) == 0 {
fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
}

return nil, fmt.Errorf(callsErrors.String())
}

// Failures returns the calls that are not satisfied.
func (cs callSet) Failures() []*Call {
failures := make([]*Call, 0, len(cs.expected))
for _, calls := range cs.expected {
for _, call := range calls {
if !call.satisfied() {
failures = append(failures, call)
}
}
}
return failures
}

+ 0
- 264
vendor/github.com/golang/mock/gomock/controller.go View File

@@ -1,264 +0,0 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package gomock is a mock framework for Go.
//
// Standard usage:
// (1) Define an interface that you wish to mock.
// type MyInterface interface {
// SomeMethod(x int64, y string)
// }
// (2) Use mockgen to generate a mock from the interface.
// (3) Use the mock in a test:
// func TestMyThing(t *testing.T) {
// mockCtrl := gomock.NewController(t)
// defer mockCtrl.Finish()
//
// mockObj := something.NewMockMyInterface(mockCtrl)
// mockObj.EXPECT().SomeMethod(4, "blah")
// // pass mockObj to a real object and play with it.
// }
//
// By default, expected calls are not enforced to run in any particular order.
// Call order dependency can be enforced by use of InOrder and/or Call.After.
// Call.After can create more varied call order dependencies, but InOrder is
// often more convenient.
//
// The following examples create equivalent call order dependencies.
//
// Example of using Call.After to chain expected call order:
//
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
//
// Example of using InOrder to declare expected call order:
//
// gomock.InOrder(
// mockObj.EXPECT().SomeMethod(1, "first"),
// mockObj.EXPECT().SomeMethod(2, "second"),
// mockObj.EXPECT().SomeMethod(3, "third"),
// )
//
// TODO:
// - Handle different argument/return types (e.g. ..., chan, map, interface).
package gomock

import (
"context"
"fmt"
"reflect"
"runtime"
"sync"
)

// A TestReporter is something that can be used to report test failures. It
// is satisfied by the standard library's *testing.T.
type TestReporter interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}

// TestHelper is a TestReporter that has the Helper method. It is satisfied
// by the standard library's *testing.T.
type TestHelper interface {
TestReporter
Helper()
}

// A Controller represents the top-level control of a mock ecosystem. It
// defines the scope and lifetime of mock objects, as well as their
// expectations. It is safe to call Controller's methods from multiple
// goroutines. Each test should create a new Controller and invoke Finish via
// defer.
//
// func TestFoo(t *testing.T) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// }
//
// func TestBar(t *testing.T) {
// t.Run("Sub-Test-1", st) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// })
// t.Run("Sub-Test-2", st) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// })
// })
type Controller struct {
// T should only be called within a generated mock. It is not intended to
// be used in user code and may be changed in future versions. T is the
// TestReporter passed in when creating the Controller via NewController.
// If the TestReporter does not implement a TestHelper it will be wrapped
// with a nopTestHelper.
T TestHelper
mu sync.Mutex
expectedCalls *callSet
finished bool
}

// NewController returns a new Controller. It is the preferred way to create a
// Controller.
func NewController(t TestReporter) *Controller {
h, ok := t.(TestHelper)
if !ok {
h = nopTestHelper{t}
}

return &Controller{
T: h,
expectedCalls: newCallSet(),
}
}

type cancelReporter struct {
TestHelper
cancel func()
}

func (r *cancelReporter) Errorf(format string, args ...interface{}) {
r.TestHelper.Errorf(format, args...)
}
func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
defer r.cancel()
r.TestHelper.Fatalf(format, args...)
}

// WithContext returns a new Controller and a Context, which is cancelled on any
// fatal failure.
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
h, ok := t.(TestHelper)
if !ok {
h = nopTestHelper{t}
}

ctx, cancel := context.WithCancel(ctx)
return NewController(&cancelReporter{h, cancel}), ctx
}

type nopTestHelper struct {
TestReporter
}

func (h nopTestHelper) Helper() {}

// RecordCall is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
ctrl.T.Helper()

recv := reflect.ValueOf(receiver)
for i := 0; i < recv.Type().NumMethod(); i++ {
if recv.Type().Method(i).Name == method {
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
}
}
ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
panic("unreachable")
}

// RecordCallWithMethodType is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
ctrl.T.Helper()

call := newCall(ctrl.T, receiver, method, methodType, args...)

ctrl.mu.Lock()
defer ctrl.mu.Unlock()
ctrl.expectedCalls.Add(call)

return call
}

// Call is called by a mock. It should not be called by user code.
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
ctrl.T.Helper()

// Nest this code so we can use defer to make sure the lock is released.
actions := func() []func([]interface{}) []interface{} {
ctrl.T.Helper()
ctrl.mu.Lock()
defer ctrl.mu.Unlock()

expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
if err != nil {
origin := callerInfo(2)
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
}

// Two things happen here:
// * the matching call no longer needs to check prerequite calls,
// * and the prerequite calls are no longer expected, so remove them.
preReqCalls := expected.dropPrereqs()
for _, preReqCall := range preReqCalls {
ctrl.expectedCalls.Remove(preReqCall)
}

actions := expected.call(args)
if expected.exhausted() {
ctrl.expectedCalls.Remove(expected)
}
return actions
}()

var rets []interface{}
for _, action := range actions {
if r := action(args); r != nil {
rets = r
}
}

return rets
}

// Finish checks to see if all the methods that were expected to be called
// were called. It should be invoked for each Controller. It is not idempotent
// and therefore can only be invoked once.
func (ctrl *Controller) Finish() {
ctrl.T.Helper()

ctrl.mu.Lock()
defer ctrl.mu.Unlock()

if ctrl.finished {
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
}
ctrl.finished = true

// If we're currently panicking, probably because this is a deferred call,
// pass through the panic.
if err := recover(); err != nil {
panic(err)
}

// Check that all remaining expected calls are satisfied.
failures := ctrl.expectedCalls.Failures()
for _, call := range failures {
ctrl.T.Errorf("missing call(s) to %v", call)
}
if len(failures) != 0 {
ctrl.T.Fatalf("aborting test due to missing call(s)")
}
}

func callerInfo(skip int) string {
if _, file, line, ok := runtime.Caller(skip + 1); ok {
return fmt.Sprintf("%s:%d", file, line)
}
return "unknown file"
}

+ 0
- 141
vendor/github.com/golang/mock/gomock/matchers.go View File

@@ -1,141 +0,0 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package gomock

import (
"fmt"
"reflect"
)

// A Matcher is a representation of a class of values.
// It is used to represent the valid or expected arguments to a mocked method.
type Matcher interface {
// Matches returns whether x is a match.
Matches(x interface{}) bool

// String describes what the matcher matches.
String() string
}

type anyMatcher struct{}

func (anyMatcher) Matches(x interface{}) bool {
return true
}

func (anyMatcher) String() string {
return "is anything"
}

type eqMatcher struct {
x interface{}
}

func (e eqMatcher) Matches(x interface{}) bool {
return reflect.DeepEqual(e.x, x)
}

func (e eqMatcher) String() string {
return fmt.Sprintf("is equal to %v", e.x)
}

type nilMatcher struct{}

func (nilMatcher) Matches(x interface{}) bool {
if x == nil {
return true
}

v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice:
return v.IsNil()
}

return false
}

func (nilMatcher) String() string {
return "is nil"
}

type notMatcher struct {
m Matcher
}

func (n notMatcher) Matches(x interface{}) bool {
return !n.m.Matches(x)
}

func (n notMatcher) String() string {
// TODO: Improve this if we add a NotString method to the Matcher interface.
return "not(" + n.m.String() + ")"
}

type assignableToTypeOfMatcher struct {
targetType reflect.Type
}

func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
return reflect.TypeOf(x).AssignableTo(m.targetType)
}

func (m assignableToTypeOfMatcher) String() string {
return "is assignable to " + m.targetType.Name()
}

// Constructors
// Any returns a matcher that always matches.
func Any() Matcher { return anyMatcher{} }

// Eq returns a matcher that matches on equality.
//
// Example usage:
// Eq(5).Matches(5) // returns true
// Eq(5).Matches(4) // returns false
func Eq(x interface{}) Matcher { return eqMatcher{x} }

// Nil returns a matcher that matches if the received value is nil.
//
// Example usage:
// var x *bytes.Buffer
// Nil().Matches(x) // returns true
// x = &bytes.Buffer{}
// Nil().Matches(x) // returns false
func Nil() Matcher { return nilMatcher{} }

// Not reverses the results of its given child matcher.
//
// Example usage:
// Not(Eq(5)).Matches(4) // returns true
// Not(Eq(5)).Matches(5) // returns false
func Not(x interface{}) Matcher {
if m, ok := x.(Matcher); ok {
return notMatcher{m}
}
return notMatcher{Eq(x)}
}

// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
// function is assignable to the type of the parameter to this function.
//
// Example usage:
// var s fmt.Stringer = &bytes.Buffer{}
// AssignableToTypeOf(s).Matches(time.Second) // returns true
// AssignableToTypeOf(s).Matches(99) // returns false
func AssignableToTypeOf(x interface{}) Matcher {
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
}

+ 201
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/LICENSE View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

1. Definitions.

"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.

"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.

"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.

"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.

"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.

"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.

"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).

"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.

"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."

"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.

2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.

3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.

4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:

(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and

(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and

(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and

(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.

You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.

5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.

6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.

7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.

8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.

9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

END OF TERMS AND CONDITIONS

APPENDIX: How to apply the Apache License to your work.

To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.

Copyright 2019 Huawei Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

+ 347
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/auth.go View File

@@ -0,0 +1,347 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"net/url"
"sort"
"strings"
"time"
)

func setURLWithPolicy(bucketName, canonicalizedUrl string) string {
if strings.HasPrefix(canonicalizedUrl, "/"+bucketName+"/") {
canonicalizedUrl = canonicalizedUrl[len("/"+bucketName+"/"):]
} else if strings.HasPrefix(canonicalizedUrl, "/"+bucketName) {
canonicalizedUrl = canonicalizedUrl[len("/"+bucketName):]
}
return canonicalizedUrl
}

func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, policy string, params map[string]string,
headers map[string][]string, expires int64) (requestURL string, err error) {
sh := obsClient.getSecurity()
isAkSkEmpty := sh.ak == "" || sh.sk == ""
if isAkSkEmpty == false && sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
}
}

if policy != "" {
objectKey = ""
}

requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)
hostName := parsedRequestURL.Host

isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)

if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
if isV4 {
date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
delete(headers, HEADER_DATE_CAMEL)
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)
if len(headers[HEADER_HOST_CAMEL]) != 0 {
index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":")
if index != -1 {
port := headers[HEADER_HOST_CAMEL][0][index+1:]
if port == "80" || port == "443" {
headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]}
}
}

}

signedHeaders, _headers := getSignedHeaders(headers)

credential, scope := getCredential(sh.ak, obsClient.conf.region, shortDate)
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")

requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
return "", _err
}

stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
signature := getSignature(stringToSign, sh.sk, obsClient.conf.region, shortDate)

requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))

} else {
originDate := headers[HEADER_DATE_CAMEL][0]
date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate)
if parseDateErr != nil {
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
return "", parseDateErr
}
expires += date.Unix()
if policy == "" {
headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}
} else {
policy = Base64Encode([]byte(policy))
headers[HEADER_DATE_CAMEL] = []string{policy}
canonicalizedURL = setURLWithPolicy(bucketName, canonicalizedURL)
}

stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs)
signature := UrlEncode(Base64Encode(HmacSha1([]byte(sh.sk), []byte(stringToSign))), false)
if strings.Index(requestURL, "?") < 0 {
requestURL += "?"
} else {
requestURL += "&"
}
delete(headers, HEADER_DATE_CAMEL)

if obsClient.conf.signature != SignatureObs {
requestURL += "AWS"
}
if policy == "" {
requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(sh.ak, false),
expires, signature)
return

}
requestURL += fmt.Sprintf("AccessKeyId=%s&Policy=%s&Signature=%s", UrlEncode(sh.ak, false),
UrlEncode(policy, false), signature)
}
}

return
}

func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, hostName string) (requestURL string, err error) {
sh := obsClient.getSecurity()
isAkSkEmpty := sh.ak == "" || sh.sk == ""
if isAkSkEmpty == false && sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = []string{sh.securityToken}
} else {
headers[HEADER_STS_TOKEN_AMZ] = []string{sh.securityToken}
}
}
isObs := obsClient.conf.signature == SignatureObs
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
parsedRequestURL, err := url.Parse(requestURL)
if err != nil {
return "", err
}
encodeHeaders(headers)

if hostName == "" {
hostName = parsedRequestURL.Host
}

isV4 := obsClient.conf.signature == SignatureV4
prepareHostAndDate(headers, hostName, isV4)

if isAkSkEmpty {
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
} else {
ak := sh.ak
sk := sh.sk
var authorization string
if isV4 {
headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD}
ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers)
authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else {
ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
hashPrefix := V2_HASH_PREFIX
if isObs {
hashPrefix = OBS_HASH_PREFIX
}
authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"])
}
headers[HEADER_AUTH_CAMEL] = []string{authorization}
}
return
}

func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) {
headers[HEADER_HOST_CAMEL] = []string{hostName}
if date, ok := headers[HEADER_DATE_AMZ]; ok {
flag := false
if len(date) == 1 {
if isV4 {
if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
flag = true
}
} else {
if strings.HasSuffix(date[0], "GMT") {
headers[HEADER_DATE_CAMEL] = []string{date[0]}
flag = true
}
}
}
if !flag {
delete(headers, HEADER_DATE_AMZ)
}
}
if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
}

}

func encodeHeaders(headers map[string][]string) {
for key, values := range headers {
for index, value := range values {
values[index] = UrlEncode(value, true)
}
headers[key] = values
}
}

func prepareDateHeader(dataHeader, dateCamelHeader string, headers, _headers map[string][]string) {
if _, ok := _headers[HEADER_DATE_CAMEL]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
} else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
if _, ok := _headers[dataHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
} else if _, ok := headers[dateCamelHeader]; ok {
_headers[HEADER_DATE_CAMEL] = []string{""}
}
}
}

func getStringToSign(keys []string, isObs bool, _headers map[string][]string) []string {
stringToSign := make([]string, 0, len(keys))
for _, key := range keys {
var value string
prefixHeader := HEADER_PREFIX
prefixMetaHeader := HEADER_PREFIX_META
if isObs {
prefixHeader = HEADER_PREFIX_OBS
prefixMetaHeader = HEADER_PREFIX_META_OBS
}
if strings.HasPrefix(key, prefixHeader) {
if strings.HasPrefix(key, prefixMetaHeader) {
for index, v := range _headers[key] {
value += strings.TrimSpace(v)
if index != len(_headers[key])-1 {
value += ","
}
}
} else {
value = strings.Join(_headers[key], ",")
}
value = fmt.Sprintf("%s:%s", key, value)
} else {
value = strings.Join(_headers[key], ",")
}
stringToSign = append(stringToSign, value)
}
return stringToSign
}

func attachHeaders(headers map[string][]string, isObs bool) string {
length := len(headers)
_headers := make(map[string][]string, length)
keys := make([]string, 0, length)

for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
prefixheader := HEADER_PREFIX
if isObs {
prefixheader = HEADER_PREFIX_OBS
}
if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) {
keys = append(keys, _key)
_headers[_key] = value
}
} else {
delete(headers, key)
}
}

for _, interestedHeader := range interestedHeaders {
if _, ok := _headers[interestedHeader]; !ok {
_headers[interestedHeader] = []string{""}
keys = append(keys, interestedHeader)
}
}
dateCamelHeader := PARAM_DATE_AMZ_CAMEL
dataHeader := HEADER_DATE_AMZ
if isObs {
dateCamelHeader = PARAM_DATE_OBS_CAMEL
dataHeader = HEADER_DATE_OBS
}
prepareDateHeader(dataHeader, dateCamelHeader, headers, _headers)

sort.Strings(keys)
stringToSign := getStringToSign(keys, isObs, _headers)
return strings.Join(stringToSign, "\n")
}

func getScope(region, shortDate string) string {
return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
}

func getCredential(ak, region, shortDate string) (string, string) {
scope := getScope(region, shortDate)
return fmt.Sprintf("%s/%s", ak, scope), scope
}

func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
length := len(headers)
_headers := make(map[string][]string, length)
signedHeaders := make([]string, 0, length)
for key, value := range headers {
_key := strings.ToLower(strings.TrimSpace(key))
if _key != "" {
signedHeaders = append(signedHeaders, _key)
_headers[_key] = value
} else {
delete(headers, key)
}
}
sort.Strings(signedHeaders)
return signedHeaders, _headers
}

func getSignature(stringToSign, sk, region, shortDate string) string {
key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
key = HmacSha256(key, []byte(region))
key = HmacSha256(key, []byte(V4_SERVICE_NAME))
key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
return Hex(HmacSha256(key, []byte(stringToSign)))
}

+ 55
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV2.go View File

@@ -0,0 +1,55 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"strings"
)

func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string {
stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "")

var isSecurityToken bool
var securityToken []string
if isObs {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]
} else {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
parmas := strings.Split(canonicalizedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
}
logStringToSign := stringToSign
if isSecurityToken && len(securityToken) > 0 {
logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign)
return stringToSign
}

func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string {
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))}
}

+ 136
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/authV4.go View File

@@ -0,0 +1,136 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"strings"
"time"
)

func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
canonicalRequest = append(canonicalRequest, method)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, canonicalizedURL)
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, queryURL)
canonicalRequest = append(canonicalRequest, "\n")

for _, signedHeader := range signedHeaders {
values, _ := headers[signedHeader]
for _, value := range values {
canonicalRequest = append(canonicalRequest, signedHeader)
canonicalRequest = append(canonicalRequest, ":")
canonicalRequest = append(canonicalRequest, value)
canonicalRequest = append(canonicalRequest, "\n")
}
}
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
canonicalRequest = append(canonicalRequest, "\n")
canonicalRequest = append(canonicalRequest, payload)

_canonicalRequest := strings.Join(canonicalRequest, "")

var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken {
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
}
var query []string
if !isSecurityToken {
query = strings.Split(queryURL, "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
isSecurityToken = true
}
}
}
}
logCanonicalRequest := _canonicalRequest
if isSecurityToken && len(securityToken) > 0 {
logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1)
}
doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest)

stringToSign := make([]string, 0, 7)
stringToSign = append(stringToSign, V4_HASH_PREFIX)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, longDate)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, scope)
stringToSign = append(stringToSign, "\n")
stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))

_stringToSign := strings.Join(stringToSign, "")

return _stringToSign
}

// V4Auth is a wrapper for v4Auth
func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers)
}

func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
var t time.Time
if val, ok := headers[HEADER_DATE_AMZ]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok {
var err error
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[HEADER_DATE_CAMEL]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
var err error
t, err = time.Parse(RFC1123_FORMAT, val[0])
if err != nil {
t = time.Now().UTC()
}
} else {
t = time.Now().UTC()
}
shortDate := t.Format(SHORT_DATE_FORMAT)
longDate := t.Format(LONG_DATE_FORMAT)

signedHeaders, _headers := getSignedHeaders(headers)

credential, scope := getCredential(ak, region, shortDate)

payload := UNSIGNED_PAYLOAD
if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok {
payload = val[0]
}
stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers)

signature := getSignature(stringToSign, sk, region, shortDate)

ret := make(map[string]string, 3)
ret["Credential"] = credential
ret["SignedHeaders"] = strings.Join(signedHeaders, ";")
ret["Signature"] = signature
return ret
}

+ 49
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/callback.go View File

@@ -0,0 +1,49 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"io"
)

type ICallbackReadCloser interface {
setCallbackReadCloser(body io.ReadCloser)
}

func (output *PutObjectOutput) setCallbackReadCloser(body io.ReadCloser) {
output.CallbackBody.data = body
}

func (output *CompleteMultipartUploadOutput) setCallbackReadCloser(body io.ReadCloser) {
output.CallbackBody.data = body
}

// define CallbackBody
type CallbackBody struct {
data io.ReadCloser
}

func (output CallbackBody) ReadCallbackBody(p []byte) (int, error) {
if output.data == nil {
return 0, errors.New("have no callback data")
}
return output.data.Read(p)
}

func (output CallbackBody) CloseCallbackBody() error {
if output.data == nil {
return errors.New("have no callback data")
}
return output.data.Close()
}

+ 68
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_base.go View File

@@ -0,0 +1,68 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"net/http"
"strings"
)

// ObsClient defines OBS client.
type ObsClient struct {
conf *config
httpClient *http.Client
}

// New creates a new ObsClient instance.
func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) {
conf := &config{endpoint: endpoint}
conf.securityProviders = make([]securityProvider, 0, 3)
conf.securityProviders = append(conf.securityProviders, NewBasicSecurityProvider(ak, sk, ""))

conf.maxRetryCount = -1
conf.maxRedirectCount = -1
for _, configurer := range configurers {
configurer(conf)
}

if err := conf.initConfigWithDefault(); err != nil {
return nil, err
}
err := conf.getTransport()
if err != nil {
return nil, err
}

if isWarnLogEnabled() {
info := make([]string, 3)
info[0] = fmt.Sprintf("[OBS SDK Version=%s", OBS_SDK_VERSION)
info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint)
accessMode := "Virtual Hosting"
if conf.pathStyle {
accessMode = "Path"
}
info[2] = fmt.Sprintf("Access Mode=%s]", accessMode)
doLog(LEVEL_WARN, strings.Join(info, "];["))
}

if conf.httpClient != nil {
doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
obsClient := &ObsClient{conf: conf, httpClient: conf.httpClient}
return obsClient, nil
}

doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}}
return obsClient, nil
}

+ 869
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_bucket.go View File

@@ -0,0 +1,869 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"fmt"
"strings"
)

func (obsClient ObsClient) DeleteBucketCustomDomain(input *DeleteBucketCustomDomainInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("DeleteBucketCustomDomainInput is nil")
}

output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketCustomDomain", HTTP_DELETE, input.Bucket, newSubResourceSerialV2(SubResourceCustomDomain, input.CustomDomain), output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) SetBucketCustomDomain(input *SetBucketCustomDomainInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketCustomDomainInput is nil")
}

output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketCustomDomain", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) GetBucketCustomDomain(bucketName string, extensions ...extensionOptions) (output *GetBucketCustomDomainOutput, err error) {
output = &GetBucketCustomDomainOutput{}
err = obsClient.doActionWithBucket("GetBucketCustomDomain", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCustomDomain), output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) SetBucketMirrorBackToSource(input *SetBucketMirrorBackToSourceInput, extensions ...extensionOptions) (output *BaseModel, err error) {

output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketMirrorBackToSource", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) DeleteBucketMirrorBackToSource(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucketV2("DeleteBucketMirrorBackToSource", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceMirrorBackToSource), output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) GetBucketMirrorBackToSource(bucketName string, extensions ...extensionOptions) (output *GetBucketMirrorBackToSourceOutput, err error) {
output = &GetBucketMirrorBackToSourceOutput{}
err = obsClient.doActionWithBucketV2("GetBucketMirrorBackToSource", HTTP_GET, bucketName, newSubResourceSerial(SubResourceMirrorBackToSource), output, extensions)
if err != nil {
output = nil
}
return
}

// ListBuckets lists buckets.
//
// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order.
func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) {
if input == nil {
input = &ListBucketsInput{}
}
output = &ListBucketsOutput{}
err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions)
if err != nil {
output = nil
}
return
}

// CreateBucket creates a bucket.
//
// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS.
func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("CreateBucketInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucket deletes a bucket.
//
// You can use this API to delete a bucket. The bucket to be deleted must be empty
// (containing no objects, noncurrent object versions, or part fragments).
func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketStoragePolicy sets bucket storage class.
//
// You can use this API to set storage class for bucket.
func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketStoragePolicyInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
var outputS3 *getBucketStoragePolicyOutputS3
outputS3 = &getBucketStoragePolicyOutputS3{}
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions)
if err != nil {
output = nil
return
}
output.BaseModel = outputS3.BaseModel
output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass)
return
}

func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
var outputObs *getBucketStoragePolicyOutputObs
outputObs = &getBucketStoragePolicyOutputObs{}
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions)
if err != nil {
output = nil
return
}
output.BaseModel = outputObs.BaseModel
output.StorageClass = outputObs.StorageClass
return
}

// GetBucketStoragePolicy gets bucket storage class.
//
// You can use this API to obtain the storage class of a bucket.
func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketStoragePolicyObs(bucketName, extensions)
}
return obsClient.getBucketStoragePolicyS3(bucketName, extensions)
}

// SetBucketQuota sets the bucket quota.
//
// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1.
func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketQuotaInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketQuota gets the bucket quota.
//
// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota.
func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) {
output = &GetBucketQuotaOutput{}
err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions)
if err != nil {
output = nil
}
return
}

// HeadBucket checks whether a bucket exists.
//
// You can use this API to check whether a bucket exists.
func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketMetadata gets the metadata of a bucket.
//
// You can use this API to send a HEAD request to a bucket to obtain the bucket
// metadata such as the storage class and CORS rules (if set).
func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) {
output = &GetBucketMetadataOutput{}
err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetBucketMetadataOutput(output)
}
return
}

func (obsClient ObsClient) GetBucketFSStatus(input *GetBucketFSStatusInput, extensions ...extensionOptions) (output *GetBucketFSStatusOutput, err error) {
output = &GetBucketFSStatusOutput{}
err = obsClient.doActionWithBucket("GetBucketFSStatus", HTTP_HEAD, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetBucketFSStatusOutput(output)
}
return
}

// GetBucketStorageInfo gets storage information about a bucket.
//
// You can use this API to obtain storage information about a bucket, including the
// bucket size and number of objects in the bucket.
func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) {
output = &GetBucketStorageInfoOutput{}
err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
var outputS3 *getBucketLocationOutputS3
outputS3 = &getBucketLocationOutputS3{}
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputS3.BaseModel
output.Location = outputS3.Location
}
return
}
func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
var outputObs *getBucketLocationOutputObs
outputObs = &getBucketLocationOutputObs{}
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputObs.BaseModel
output.Location = outputObs.Location
}
return
}

// GetBucketLocation gets the location of a bucket.
//
// You can use this API to obtain the bucket location.
func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) {
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketLocationObs(bucketName, extensions)
}
return obsClient.getBucketLocationS3(bucketName, extensions)
}

// SetBucketAcl sets the bucket ACL.
//
// You can use this API to set the ACL for a bucket.
func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketAclInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}
func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
var outputObs *getBucketACLOutputObs
outputObs = &getBucketACLOutputObs{}
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions)
if err != nil {
output = nil
} else {
output.BaseModel = outputObs.BaseModel
output.Owner = outputObs.Owner
output.Grants = make([]Grant, 0, len(outputObs.Grants))
for _, valGrant := range outputObs.Grants {
tempOutput := Grant{}
tempOutput.Delivered = valGrant.Delivered
tempOutput.Permission = valGrant.Permission
tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName
tempOutput.Grantee.ID = valGrant.Grantee.ID
tempOutput.Grantee.Type = valGrant.Grantee.Type
if valGrant.Grantee.Canned == "Everyone" {
tempOutput.Grantee.URI = GroupAllUsers
}
output.Grants = append(output.Grants, tempOutput)
}
}
return
}

// GetBucketAcl gets the bucket ACL.
//
// You can use this API to obtain a bucket ACL.
func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
if obsClient.conf.signature == SignatureObs {
return obsClient.getBucketACLObs(bucketName, extensions)
}
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketPolicy sets the bucket policy.
//
// You can use this API to set a bucket policy. If the bucket already has a policy, the
// policy will be overwritten by the one specified in this request.
func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketPolicy is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketPolicy gets the bucket policy.
//
// You can use this API to obtain the policy of a bucket.
func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) {
output = &GetBucketPolicyOutput{}
err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketPolicy deletes the bucket policy.
//
// You can use this API to delete the policy of a bucket.
func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketCors sets CORS rules for a bucket.
//
// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests.
func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketCorsInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketCors gets CORS rules of a bucket.
//
// You can use this API to obtain the CORS rules of a specified bucket.
func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) {
output = &GetBucketCorsOutput{}
err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketCors deletes CORS rules of a bucket.
//
// You can use this API to delete the CORS rules of a specified bucket.
func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketVersioning sets the versioning status for a bucket.
//
// You can use this API to set the versioning status for a bucket.
func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketVersioningInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketVersioning gets the versioning status of a bucket.
//
// You can use this API to obtain the versioning status of a bucket.
func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) {
output = &GetBucketVersioningOutput{}
err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketWebsiteConfiguration sets website hosting for a bucket.
//
// You can use this API to set website hosting for a bucket.
func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketWebsiteConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket.
//
// You can use this API to obtain the website hosting settings of a bucket.
func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) {
output = &GetBucketWebsiteConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket.
//
// You can use this API to delete the website hosting settings of a bucket.
func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketLoggingConfiguration sets the bucket logging.
//
// You can use this API to configure access logging for a bucket.
func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketLoggingConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketLoggingConfiguration gets the logging settings of a bucket.
//
// You can use this API to obtain the access logging settings of a bucket.
func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) {
output = &GetBucketLoggingConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket.
//
// You can use this API to set lifecycle rules for a bucket, to periodically transit
// storage classes of objects and delete objects in the bucket.
func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketLifecycleConfigurationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket.
//
// You can use this API to obtain the lifecycle rules of a bucket.
func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) {
output = &GetBucketLifecycleConfigurationOutput{}
err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket.
//
// You can use this API to delete all lifecycle rules of a bucket.
func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketEncryption sets the default server-side encryption for a bucket.
//
// You can use this API to create or update the default server-side encryption for a bucket.
func (obsClient ObsClient) SetBucketEncryption(input *SetBucketEncryptionInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketEncryptionInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketEncryption", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketEncryption gets the encryption configuration of a bucket.
//
// You can use this API to obtain obtain the encryption configuration of a bucket.
func (obsClient ObsClient) GetBucketEncryption(bucketName string, extensions ...extensionOptions) (output *GetBucketEncryptionOutput, err error) {
output = &GetBucketEncryptionOutput{}
err = obsClient.doActionWithBucket("GetBucketEncryption", HTTP_GET, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketEncryption deletes the encryption configuration of a bucket.
//
// You can use this API to delete the encryption configuration of a bucket.
func (obsClient ObsClient) DeleteBucketEncryption(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketEncryption", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketTagging sets bucket tags.
//
// You can use this API to set bucket tags.
func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketTaggingInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketTagging gets bucket tags.
//
// You can use this API to obtain the tags of a specified bucket.
func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) {
output = &GetBucketTaggingOutput{}
err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketTagging deletes bucket tags.
//
// You can use this API to delete the tags of a specified bucket.
func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketNotification sets event notification for a bucket.
//
// You can use this API to configure event notification for a bucket. You will be notified of all
// specified operations performed on the bucket.
func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketNotificationInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketNotification gets event notification settings of a bucket.
//
// You can use this API to obtain the event notification configuration of a bucket.
func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) {
if obsClient.conf.signature != SignatureObs {
return obsClient.getBucketNotificationS3(bucketName, extensions)
}
output = &GetBucketNotificationOutput{}
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) {
outputS3 := &getBucketNotificationOutputS3{}
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions)
if err != nil {
return nil, err
}

output = &GetBucketNotificationOutput{}
output.BaseModel = outputS3.BaseModel
topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations))
for _, topicConfigurationS3 := range outputS3.TopicConfigurations {
topicConfiguration := TopicConfiguration{}
topicConfiguration.ID = topicConfigurationS3.ID
topicConfiguration.Topic = topicConfigurationS3.Topic
topicConfiguration.FilterRules = topicConfigurationS3.FilterRules

events := make([]EventType, 0, len(topicConfigurationS3.Events))
for _, event := range topicConfigurationS3.Events {
events = append(events, ParseStringToEventType(event))
}
topicConfiguration.Events = events
topicConfigurations = append(topicConfigurations, topicConfiguration)
}
output.TopicConfigurations = topicConfigurations
return
}

// SetBucketRequestPayment sets requester-pays setting for a bucket.
func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketRequestPaymentInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketRequestPayment gets requester-pays setting of a bucket.
func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) {
output = &GetBucketRequestPaymentOutput{}
err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketFetchPolicy sets the bucket fetch policy.
//
// You can use this API to set a bucket fetch policy.
func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetBucketFetchPolicyInput is nil")
}
if strings.TrimSpace(string(input.Status)) == "" {
return nil, errors.New("Fetch policy status is empty")
}
if strings.TrimSpace(input.Agency) == "" {
return nil, errors.New("Fetch policy agency is empty")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketFetchPolicy gets the bucket fetch policy.
//
// You can use this API to obtain the fetch policy of a bucket.
func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) {
if input == nil {
return nil, errors.New("GetBucketFetchPolicyInput is nil")
}
output = &GetBucketFetchPolicyOutput{}
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketFetchPolicy deletes the bucket fetch policy.
//
// You can use this API to delete the fetch policy of a bucket.
func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("DeleteBucketFetchPolicyInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
if err != nil {
output = nil
}
return
}

// SetBucketFetchJob sets the bucket fetch job.
//
// You can use this API to set a bucket fetch job.
func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) {
if input == nil {
return nil, errors.New("SetBucketFetchJobInput is nil")
}
if strings.TrimSpace(input.URL) == "" {
return nil, errors.New("URL is empty")
}
output = &SetBucketFetchJobOutput{}
err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketFetchJob gets the bucket fetch job.
//
// You can use this API to obtain the fetch job of a bucket.
func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) {
if input == nil {
return nil, errors.New("GetBucketFetchJobInput is nil")
}
if strings.TrimSpace(input.JobID) == "" {
return nil, errors.New("JobID is empty")
}
output = &GetBucketFetchJobOutput{}
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions)
if err != nil {
output = nil
}
return
}

// PutBucketPublicAccessBlock sets the bucket Block Public Access.
//
// You can use this API to set a bucket Block Public Access.
func (obsClient ObsClient) PutBucketPublicAccessBlock(input *PutBucketPublicAccessBlockInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("PutBucketPublicAccessBlockInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucket("PutBucketPublicAccessBlock", HTTP_PUT, input.Bucket, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketPublicAccessBlock gets the bucket Block Public Access.
//
// You can use this API to get a bucket Block Public Access.
func (obsClient ObsClient) GetBucketPublicAccessBlock(bucketName string, extensions ...extensionOptions) (output *GetBucketPublicAccessBlockOutput, err error) {
output = &GetBucketPublicAccessBlockOutput{}
err = obsClient.doActionWithBucket("GetBucketPublicAccessBlock", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePublicAccessBlock), output, extensions)
if err != nil {
output = nil
}
return
}

// DeleteBucketPublicAccessBlock deletes the bucket Block Public Access.
//
// You can use this API to delete the Block Public Access of a bucket.
func (obsClient ObsClient) DeleteBucketPublicAccessBlock(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doActionWithBucket("DeleteBucketPublicAccessBlock", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePublicAccessBlock), output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketPolicyPublicStatus get the bucket Policy status.
//
// You can use this API to get a bucket Policy status.
func (obsClient ObsClient) GetBucketPolicyPublicStatus(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyPublicStatusOutput, err error) {
output = &GetBucketPolicyPublicStatusOutput{}
err = obsClient.doActionWithBucket("GetBucketPolicyPublicStatus", HTTP_GET, bucketName, newSubResourceSerial(SubResourceBucketPolicyPublicStatus), output, extensions)
if err != nil {
output = nil
}
return
}

// GetBucketPublicStatus get the bucket public status.
//
// You can use this API to get a bucket public status.
func (obsClient ObsClient) GetBucketPublicStatus(bucketName string, extensions ...extensionOptions) (output *GetBucketPublicStatusOutput, err error) {
output = &GetBucketPublicStatusOutput{}
err = obsClient.doActionWithBucket("GetBucketPublicStatus", HTTP_GET, bucketName, newSubResourceSerial(SubResourceBucketPublicStatus), output, extensions)
if err != nil {
output = nil
}
return
}

+ 571
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_object.go View File

@@ -0,0 +1,571 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"fmt"
"io"
"os"
"strings"
)

// ListObjects lists objects in a bucket.
//
// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed.
func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) {
if input == nil {
return nil, errors.New("ListObjectsInput is nil")
}
output = &ListObjectsOutput{}
err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListPosixObjects lists objects in a posix.
//
// You can use this API to list objects in a posix. By default, a maximum of 1000 objects are listed.
func (obsClient ObsClient) ListPosixObjects(input *ListPosixObjectsInput, extensions ...extensionOptions) (output *ListPosixObjectsOutput, err error) {
if input == nil {
return nil, errors.New("ListPosixObjects is nil")
}
output = &ListPosixObjectsOutput{}
err = obsClient.doActionWithBucket("ListPosixObjects", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListPosixObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListPosixObjectsOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListVersions lists versioning objects in a bucket.
//
// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed.
func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) {
if input == nil {
return nil, errors.New("ListVersionsInput is nil")
}
output = &ListVersionsOutput{}
err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListVersionsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
output = nil
}
}
}
return
}

// HeadObject checks whether an object exists.
//
// You can use this API to check whether an object exists.
func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("HeadObjectInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

// SetObjectMetadata sets object metadata.
func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) {
output = &SetObjectMetadataOutput{}
err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseSetObjectMetadataOutput(output)
}
return
}

// DeleteObject deletes an object.
//
// You can use this API to delete an object from a specified bucket.
func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) {
if input == nil {
return nil, errors.New("DeleteObjectInput is nil")
}
output = &DeleteObjectOutput{}
err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseDeleteObjectOutput(output)
}
return
}

// DeleteObjects deletes objects in a batch.
//
// You can use this API to batch delete objects from a specified bucket.
func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) {
if input == nil {
return nil, errors.New("DeleteObjectsInput is nil")
}
output = &DeleteObjectsOutput{}
err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeDeleteObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
output = nil
}
}
return
}

// SetObjectAcl sets ACL for an object.
//
// You can use this API to set the ACL for an object in a specified bucket.
func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetObjectAclInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetObjectAcl gets the ACL of an object.
//
// You can use this API to obtain the ACL of an object in a specified bucket.
func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectAclInput is nil")
}
output = &GetObjectAclOutput{}
err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}
}
return
}

// RestoreObject restores an object.
func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("RestoreObjectInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

// GetObjectMetadata gets object metadata.
//
// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata.
func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectMetadataInput is nil")
}
output = &GetObjectMetadataOutput{}
err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetObjectMetadataOutput(output)
}
return
}

func (obsClient ObsClient) GetAttribute(input *GetAttributeInput, extensions ...extensionOptions) (output *GetAttributeOutput, err error) {
if input == nil {
return nil, errors.New("GetAttributeInput is nil")
}
output = &GetAttributeOutput{}
err = obsClient.doActionWithBucketAndKey("GetAttribute", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseGetAttributeOutput(output)
}
return
}

// GetObject downloads object.
//
// You can use this API to download an object in a specified bucket.
func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectInput is nil")
}
if input.Range != "" && !strings.HasPrefix(input.Range, "bytes=") {
return nil, errors.New("Range should start with [bytes=]")
}
output = &GetObjectOutput{}
err = obsClient.doActionWithBucketAndKeyWithProgress(GET_OBJECT, HTTP_GET, input.Bucket, input.Key, input, output, extensions, nil)
if err != nil {
output = nil
return
}

ParseGetObjectOutput(output)
listener := obsClient.getProgressListener(extensions)
if listener != nil {
output.Body = TeeReader(output.Body, output.ContentLength, listener, nil)
}
return
}

func (obsClient ObsClient) GetObjectWithoutProgress(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
if input == nil {
return nil, errors.New("GetObjectInput is nil")
}
output = &GetObjectOutput{}
err = obsClient.doActionWithBucketAndKeyWithProgress(GET_OBJECT, HTTP_GET, input.Bucket, input.Key, input, output, extensions, nil)
if err != nil {
output = nil
return
}

ParseGetObjectOutput(output)
return
}

// PutObject uploads an object to the specified bucket.
func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
if input == nil {
return nil, errors.New("PutObjectInput is nil")
}

if input.ContentType == "" && input.Key != "" {
if contentType, ok := GetContentType(input.Key); ok {
input.ContentType = contentType
}
}
output = &PutObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); ok {
repeatable = true
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}

listener := obsClient.getProgressListener(extensions)
if repeatable {
err = obsClient.doActionWithBucketAndKeyWithProgress(PUT_OBJECT, HTTP_PUT, input.Bucket, input.Key, input, output, extensions, listener)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatableWithProgress(PUT_OBJECT, HTTP_PUT, input.Bucket, input.Key, input, output, extensions, listener)
}
if err != nil {
output = nil
return
}
ParsePutObjectOutput(output)
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
return
}

func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) {
if contentType, ok := GetContentType(input.Key); ok {
return contentType
}
if contentType, ok := GetContentType(sourceFile); ok {
return contentType
}
return
}

func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool {
if input.ContentType == "" && input.Key != "" {
return true
}
return false
}

func (obsClient ObsClient) NewFolder(input *NewFolderInput, extensions ...extensionOptions) (output *NewFolderOutput, err error) {
if input == nil {
return nil, errors.New("NewFolderInput is nil")
}

if !strings.HasSuffix(input.Key, "/") {
input.Key += "/"
}

output = &NewFolderOutput{}
err = obsClient.doActionWithBucketAndKey("NewFolder", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseNewFolderOutput(output)
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
}
return
}

// PutFile uploads a file to the specified bucket.
func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
if input == nil {
return nil, errors.New("PutFileInput is nil")
}

var body io.Reader
sourceFile := strings.TrimSpace(input.SourceFile)
if sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()

stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd
if input.ContentLength > 0 {
if input.ContentLength > stat.Size() {
input.ContentLength = stat.Size()
}
fileReaderWrapper.totalCount = input.ContentLength
} else {
fileReaderWrapper.totalCount = stat.Size()
}
body = fileReaderWrapper
}

_input := &PutObjectInput{}
_input.PutObjectBasicInput = input.PutObjectBasicInput
_input.Body = body

if obsClient.isGetContentType(_input) {
_input.ContentType = obsClient.getContentType(_input, sourceFile)
}
listener := obsClient.getProgressListener(extensions)
output = &PutObjectOutput{}
err = obsClient.doActionWithBucketAndKeyWithProgress(PUT_FILE, HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions, listener)

if err != nil {
output = nil
return
}

ParsePutObjectOutput(output)
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
return
}

// CopyObject creates a copy for an existing object.
//
// You can use this API to create a copy for an object in a specified bucket.
func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) {
if input == nil {
return nil, errors.New("CopyObjectInput is nil")
}

if strings.TrimSpace(input.CopySourceBucket) == "" {
return nil, errors.New("Source bucket is empty")
}
if strings.TrimSpace(input.CopySourceKey) == "" {
return nil, errors.New("Source key is empty")
}

output = &CopyObjectOutput{}
err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCopyObjectOutput(output)
}
return
}

func (obsClient ObsClient) AppendObject(input *AppendObjectInput, extensions ...extensionOptions) (output *AppendObjectOutput, err error) {
if input == nil {
return nil, errors.New("AppendObjectInput is nil")
}

if input.ContentType == "" && input.Key != "" {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
input.ContentType = contentType
}
}
output = &AppendObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); ok {
repeatable = true
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}
listener := obsClient.getProgressListener(extensions)

if repeatable {
err = obsClient.doActionWithBucketAndKeyWithProgress(APPEND_OBJECT, HTTP_POST, input.Bucket, input.Key, input, output, extensions, listener)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatableWithProgress(APPEND_OBJECT, HTTP_POST, input.Bucket, input.Key, input, output, extensions, listener)
}

if err != nil || ParseAppendObjectOutput(output) != nil {
output = nil
}
return
}

func (obsClient ObsClient) ModifyObject(input *ModifyObjectInput, extensions ...extensionOptions) (output *ModifyObjectOutput, err error) {
if input == nil {
return nil, errors.New("ModifyObjectInput is nil")
}

output = &ModifyObjectOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); ok {
repeatable = true
}
if input.ContentLength > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
}
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
ParseModifyObjectOutput(output)
}
return
}

func (obsClient ObsClient) RenameFile(input *RenameFileInput, extensions ...extensionOptions) (output *RenameFileOutput, err error) {
if input == nil {
return nil, errors.New("RenameFileInput is nil")
}

output = &RenameFileOutput{}
err = obsClient.doActionWithBucketAndKey("RenameFile", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) RenameFolder(input *RenameFolderInput, extensions ...extensionOptions) (output *RenameFolderOutput, err error) {
if input == nil {
return nil, errors.New("RenameFolderInput is nil")
}

if !strings.HasSuffix(input.Key, "/") {
input.Key += "/"
}
if !strings.HasSuffix(input.NewObjectKey, "/") {
input.NewObjectKey += "/"
}
output = &RenameFolderOutput{}
err = obsClient.doActionWithBucketAndKey("RenameFolder", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) SetDirAccesslabel(input *SetDirAccesslabelInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("SetDirAccesslabelInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKeyV2("SetDirAccesslabel", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) GetDirAccesslabel(input *GetDirAccesslabelInput, extensions ...extensionOptions) (output *GetDirAccesslabelOutput, err error) {
if input == nil {
return nil, errors.New("GetDirAccesslabelInput is nil")
}
output = &GetDirAccesslabelOutput{}
err = obsClient.doActionWithBucketAndKeyV2("GetDirAccesslabel", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

func (obsClient ObsClient) DeleteDirAccesslabel(input *DeleteDirAccesslabelInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("DeleteDirAccesslabelInput is nil")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKeyV2("DeleteDirAccesslabel", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

+ 49
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_other.go View File

@@ -0,0 +1,49 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"strings"
)

// Refresh refreshes ak, sk and securityToken for obsClient.
func (obsClient ObsClient) Refresh(ak, sk, securityToken string) {
for _, sp := range obsClient.conf.securityProviders {
if bsp, ok := sp.(*BasicSecurityProvider); ok {
bsp.refresh(strings.TrimSpace(ak), strings.TrimSpace(sk), strings.TrimSpace(securityToken))
break
}
}
}

func (obsClient ObsClient) getSecurity() securityHolder {
if obsClient.conf.securityProviders != nil {
for _, sp := range obsClient.conf.securityProviders {
if sp == nil {
continue
}
sh := sp.getSecurity()
if sh.ak != "" && sh.sk != "" {
return sh
}
}
}
return emptySecurityHolder
}

// Close closes ObsClient.
func (obsClient *ObsClient) Close() {
obsClient.httpClient = nil
obsClient.conf.transport.CloseIdleConnections()
obsClient.conf = nil
}

+ 257
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_part.go View File

@@ -0,0 +1,257 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"io"
"os"
"sort"
"strings"
)

// ListMultipartUploads lists the multipart uploads.
//
// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket.
func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) {
if input == nil {
return nil, errors.New("ListMultipartUploadsInput is nil")
}
output = &ListMultipartUploadsOutput{}
err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListMultipartUploadsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
output = nil
}
}
return
}

// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) {
if input == nil {
return nil, errors.New("AbortMultipartUploadInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
output = &BaseModel{}
err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
}
return
}

// InitiateMultipartUpload initializes a multipart upload.
func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) {
if input == nil {
return nil, errors.New("InitiateMultipartUploadInput is nil")
}

if input.ContentType == "" && input.Key != "" {
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
input.ContentType = contentType
}
}

output = &InitiateMultipartUploadOutput{}
err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseInitiateMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeInitiateMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}

// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID.
//
// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket
// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB,
// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000.
func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) {
if _input == nil {
return nil, errors.New("UploadPartInput is nil")
}

if _input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}

input := &UploadPartInput{}
input.Bucket = _input.Bucket
input.Key = _input.Key
input.PartNumber = _input.PartNumber
input.UploadId = _input.UploadId
input.ContentMD5 = _input.ContentMD5
input.ContentSHA256 = _input.ContentSHA256
input.SourceFile = _input.SourceFile
input.Offset = _input.Offset
input.PartSize = _input.PartSize
input.SseHeader = _input.SseHeader
input.Body = _input.Body

output = &UploadPartOutput{}
var repeatable bool
if input.Body != nil {
if _, ok := input.Body.(*strings.Reader); ok {
repeatable = true
}
if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 {
input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize}
}
} else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()

stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileSize := stat.Size()
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd

if input.Offset < 0 || input.Offset > fileSize {
input.Offset = 0
}

if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) {
input.PartSize = fileSize - input.Offset
}
fileReaderWrapper.totalCount = input.PartSize
fileReaderWrapper.mark = input.Offset
if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil {
return nil, err
}
input.Body = fileReaderWrapper
repeatable = true
}
if repeatable {
err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
} else {
err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
}
if err != nil {
output = nil
} else {
ParseUploadPartOutput(output)
output.PartNumber = input.PartNumber
}
return
}

// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
if input == nil {
return nil, errors.New("CompleteMultipartUploadInput is nil")
}

if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}

var parts partSlice = input.Parts
sort.Sort(parts)

output = &CompleteMultipartUploadOutput{}
err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCompleteMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeCompleteMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListParts lists the uploaded parts in a bucket by using the multipart upload ID.
func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) {
if input == nil {
return nil, errors.New("ListPartsInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
output = &ListPartsOutput{}
err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListPartsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
output = nil
}
}
return
}

// CopyPart copy a part to a specified bucket by using a specified multipart upload ID.
//
// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID.
func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) {
if input == nil {
return nil, errors.New("CopyPartInput is nil")
}
if input.UploadId == "" {
return nil, errors.New("UploadId is empty")
}
if strings.TrimSpace(input.CopySourceBucket) == "" {
return nil, errors.New("Source bucket is empty")
}
if strings.TrimSpace(input.CopySourceKey) == "" {
return nil, errors.New("Source key is empty")
}
if input.CopySourceRange != "" && !strings.HasPrefix(input.CopySourceRange, "bytes=") {
return nil, errors.New("Source Range should start with [bytes=]")
}

output = &CopyPartOutput{}
err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
if err != nil {
output = nil
} else {
ParseCopyPartOutput(output)
output.PartNumber = input.PartNumber
}
return
}

+ 59
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/client_resume.go View File

@@ -0,0 +1,59 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

// UploadFile resume uploads.
//
// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file
// upload failures caused by poor network conditions and program breakdowns.
func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
if input.EnableCheckpoint && input.CheckpointFile == "" {
input.CheckpointFile = input.UploadFile + ".uploadfile_record"
}

if input.TaskNum <= 0 {
input.TaskNum = 1
}
if input.PartSize < MIN_PART_SIZE {
input.PartSize = MIN_PART_SIZE
} else if input.PartSize > MAX_PART_SIZE {
input.PartSize = MAX_PART_SIZE
}

output, err = obsClient.resumeUpload(input, extensions)
return
}

// DownloadFile resume downloads.
//
// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file
// download failures caused by poor network conditions and program breakdowns.
func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
if input.DownloadFile == "" {
input.DownloadFile = input.Key
}

if input.EnableCheckpoint && input.CheckpointFile == "" {
input.CheckpointFile = input.DownloadFile + ".downloadfile_record"
}

if input.TaskNum <= 0 {
input.TaskNum = 1
}
if input.PartSize <= 0 {
input.PartSize = DEFAULT_PART_SIZE
}

output, err = obsClient.resumeDownload(input, extensions)
return
}

+ 563
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/conf.go View File

@@ -0,0 +1,563 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"

"golang.org/x/net/http/httpproxy"
)

type urlHolder struct {
scheme string
host string
port int
}

type config struct {
securityProviders []securityProvider
urlHolder *urlHolder
pathStyle bool
cname bool
sslVerify bool
disableKeepAlive bool
endpoint string
signature SignatureType
region string
connectTimeout int
socketTimeout int
headerTimeout int
idleConnTimeout int
finalTimeout int
maxRetryCount int
proxyURL string
noProxyURL string
proxyFromEnv bool
maxConnsPerHost int
pemCerts []byte
transport *http.Transport
roundTripper http.RoundTripper
httpClient *http.Client
ctx context.Context
maxRedirectCount int
userAgent string
enableCompression bool
progressListener ProgressListener
customProxyOnce sync.Once
customProxyFuncValue func(*url.URL) (*url.URL, error)
}

func (conf config) String() string {
return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s,"+
"\nconnectTimeout:%d, socketTimeout:%d, headerTimeout:%d, idleConnTimeout:%d,"+
"\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d,"+
"\ncname:%v, userAgent:%s, disableKeepAlive:%v, proxyFromEnv:%v]",
conf.endpoint, conf.signature, conf.pathStyle, conf.region,
conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount,
conf.cname, conf.userAgent, conf.disableKeepAlive, conf.proxyFromEnv,
)
}

type configurer func(conf *config)

func WithSecurityProviders(sps ...securityProvider) configurer {
return func(conf *config) {
for _, sp := range sps {
if sp != nil {
conf.securityProviders = append(conf.securityProviders, sp)
}
}
}
}

// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts.
func WithSslVerify(sslVerify bool) configurer {
return WithSslVerifyAndPemCerts(sslVerify, nil)
}

// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts.
func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
return func(conf *config) {
conf.sslVerify = sslVerify
conf.pemCerts = pemCerts
}
}

// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers.
func WithHeaderTimeout(headerTimeout int) configurer {
return func(conf *config) {
conf.headerTimeout = headerTimeout
}
}

// WithProxyUrl is a configurer for ObsClient to set HTTP proxy.
func WithProxyUrl(proxyURL string) configurer {
return func(conf *config) {
conf.proxyURL = proxyURL
}
}

// WithNoProxyUrl is a configurer for ObsClient to set HTTP no_proxy.
func WithNoProxyUrl(noProxyURL string) configurer {
return func(conf *config) {
conf.noProxyURL = noProxyURL
}
}

// WithProxyFromEnv is a configurer for ObsClient to get proxy from evironment.
func WithProxyFromEnv(proxyFromEnv bool) configurer {
return func(conf *config) {
conf.proxyFromEnv = proxyFromEnv
}
}

// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections.
func WithMaxConnections(maxConnsPerHost int) configurer {
return func(conf *config) {
conf.maxConnsPerHost = maxConnsPerHost
}
}

// WithPathStyle is a configurer for ObsClient.
func WithPathStyle(pathStyle bool) configurer {
return func(conf *config) {
conf.pathStyle = pathStyle
}
}

// WithSignature is a configurer for ObsClient.
func WithSignature(signature SignatureType) configurer {
return func(conf *config) {
conf.signature = signature
}
}

// WithRegion is a configurer for ObsClient.
func WithRegion(region string) configurer {
return func(conf *config) {
conf.region = region
}
}

// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing
// an http/https connection, in seconds.
func WithConnectTimeout(connectTimeout int) configurer {
return func(conf *config) {
conf.connectTimeout = connectTimeout
}
}

// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at
// the socket layer, in seconds.
func WithSocketTimeout(socketTimeout int) configurer {
return func(conf *config) {
conf.socketTimeout = socketTimeout
}
}

// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection
// in the connection pool, in seconds.
func WithIdleConnTimeout(idleConnTimeout int) configurer {
return func(conf *config) {
conf.idleConnTimeout = idleConnTimeout
}
}

// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal.
func WithMaxRetryCount(maxRetryCount int) configurer {
return func(conf *config) {
conf.maxRetryCount = maxRetryCount
}
}

// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys.
func WithSecurityToken(securityToken string) configurer {
return func(conf *config) {
for _, sp := range conf.securityProviders {
if bsp, ok := sp.(*BasicSecurityProvider); ok {
sh := bsp.getSecurity()
bsp.refresh(sh.ak, sh.sk, securityToken)
break
}
}
}
}

// WithHttpTransport is a configurer for ObsClient to set the customized http Transport.
func WithHttpTransport(transport *http.Transport) configurer {
return func(conf *config) {
conf.transport = transport
}
}

func WithHttpClient(httpClient *http.Client) configurer {
return func(conf *config) {
conf.httpClient = httpClient
}
}

// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request.
func WithRequestContext(ctx context.Context) configurer {
return func(conf *config) {
conf.ctx = ctx
}
}

// WithCustomDomainName is a configurer for ObsClient.
func WithCustomDomainName(cname bool) configurer {
return func(conf *config) {
conf.cname = cname
}
}

// WithDisableKeepAlive is a configurer for ObsClient to disable the keep-alive for http.
func WithDisableKeepAlive(disableKeepAlive bool) configurer {
return func(conf *config) {
conf.disableKeepAlive = disableKeepAlive
}
}

// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected.
func WithMaxRedirectCount(maxRedirectCount int) configurer {
return func(conf *config) {
conf.maxRedirectCount = maxRedirectCount
}
}

// WithUserAgent is a configurer for ObsClient to set the User-Agent.
func WithUserAgent(userAgent string) configurer {
return func(conf *config) {
conf.userAgent = userAgent
}
}

// WithEnableCompression is a configurer for ObsClient to set the Transport.DisableCompression.
func WithEnableCompression(enableCompression bool) configurer {
return func(conf *config) {
conf.enableCompression = enableCompression
}
}

func (conf *config) prepareConfig() {
if conf.connectTimeout <= 0 {
conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
}

if conf.socketTimeout <= 0 {
conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
}

conf.finalTimeout = conf.socketTimeout * 10

if conf.headerTimeout <= 0 {
conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
}

if conf.idleConnTimeout < 0 {
conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
}

if conf.maxRetryCount < 0 {
conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
}

if conf.maxConnsPerHost <= 0 {
conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
}

if conf.maxRedirectCount < 0 {
conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT
}

if conf.pathStyle && conf.signature == SignatureObs {
conf.signature = SignatureV2
}
}

func (conf *config) initConfigWithDefault() error {
conf.endpoint = strings.TrimSpace(conf.endpoint)
if conf.endpoint == "" {
return errors.New("endpoint is not set")
}

if index := strings.Index(conf.endpoint, "?"); index > 0 {
conf.endpoint = conf.endpoint[:index]
}

for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
}

if conf.signature == "" {
conf.signature = DEFAULT_SIGNATURE
}

urlHolder := &urlHolder{}
var address string
if strings.HasPrefix(conf.endpoint, "https://") {
urlHolder.scheme = "https"
address = conf.endpoint[len("https://"):]
} else if strings.HasPrefix(conf.endpoint, "http://") {
urlHolder.scheme = "http"
address = conf.endpoint[len("http://"):]
} else {
urlHolder.scheme = "https"
address = conf.endpoint
}

addr := strings.Split(address, ":")
if len(addr) == 2 {
if port, err := strconv.Atoi(addr[1]); err == nil {
urlHolder.port = port
}
}
urlHolder.host = addr[0]
if urlHolder.port == 0 {
if urlHolder.scheme == "https" {
urlHolder.port = 443
} else {
urlHolder.port = 80
}
}

if IsIP(urlHolder.host) {
conf.pathStyle = true
}

conf.urlHolder = urlHolder

conf.region = strings.TrimSpace(conf.region)
if conf.region == "" {
conf.region = DEFAULT_REGION
}

conf.prepareConfig()
conf.proxyURL = strings.TrimSpace(conf.proxyURL)
return nil
}

func (conf *config) getTransport() error {
if conf.transport == nil {
conf.transport = &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
if err != nil {
return nil, err
}
return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
},
MaxIdleConns: conf.maxConnsPerHost,
MaxIdleConnsPerHost: conf.maxConnsPerHost,
ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
DisableKeepAlives: conf.disableKeepAlive,
}
if conf.proxyURL != "" {
conf.transport.Proxy = conf.customProxyFromEnvironment
} else if conf.proxyFromEnv {
conf.transport.Proxy = http.ProxyFromEnvironment
}

tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
if conf.sslVerify && conf.pemCerts != nil {
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(conf.pemCerts)
tlsConfig.RootCAs = pool
}

conf.transport.TLSClientConfig = tlsConfig
conf.transport.DisableCompression = !conf.enableCompression
}

return nil
}

func (conf *config) customProxyFromEnvironment(req *http.Request) (*url.URL, error) {
url, err := conf.customProxyFunc()(req.URL)
return url, err
}

func (conf *config) customProxyFunc() func(*url.URL) (*url.URL, error) {
conf.customProxyOnce.Do(func() {
customhttpproxy := &httpproxy.Config{
HTTPProxy: conf.proxyURL,
HTTPSProxy: conf.proxyURL,
NoProxy: conf.noProxyURL,
CGI: os.Getenv("REQUEST_METHOD") != "",
}
conf.customProxyFuncValue = customhttpproxy.ProxyFunc()
})
return conf.customProxyFuncValue
}

func checkRedirectFunc(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}

// DummyQueryEscape return the input string.
func DummyQueryEscape(s string) string {
return s
}

func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) {
urlHolder := conf.urlHolder
if conf.cname {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
if conf.signature == "v4" {
canonicalizedURL = "/"
} else {
canonicalizedURL = "/" + urlHolder.host + "/"
}
} else {
if bucketName == "" {
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
canonicalizedURL = "/"
} else {
if conf.pathStyle {
requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
canonicalizedURL = "/" + bucketName
} else {
requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
if conf.signature == "v2" || conf.signature == "OBS" {
canonicalizedURL = "/" + bucketName + "/"
} else {
canonicalizedURL = "/"
}
}
}
}
return
}

func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) {
if escape {
tempKey := []rune(objectKey)
result := make([]string, 0, len(tempKey))
for _, value := range tempKey {
if string(value) == "/" {
result = append(result, string(value))
} else {
if string(value) == " " {
result = append(result, url.PathEscape(string(value)))
} else {
result = append(result, url.QueryEscape(string(value)))
}
}
}
encodeObjectKey = strings.Join(result, "")
} else {
encodeObjectKey = escapeFunc(objectKey)
}
return
}

func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) {
if escape {
return url.QueryEscape
}
return DummyQueryEscape
}

func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) {

requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName)
var escapeFunc func(s string) string
escapeFunc = conf.prepareEscapeFunc(escape)

if objectKey != "" {
var encodeObjectKey string
encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc)
requestURL += "/" + encodeObjectKey
if !strings.HasSuffix(canonicalizedURL, "/") {
canonicalizedURL += "/"
}
canonicalizedURL += encodeObjectKey
}

keys := make([]string, 0, len(params))
for key := range params {
keys = append(keys, strings.TrimSpace(key))
}
sort.Strings(keys)
i := 0

for index, key := range keys {
if index == 0 {
requestURL += "?"
} else {
requestURL += "&"
}
_key := url.QueryEscape(key)
requestURL += _key

_value := params[key]
if conf.signature == "v4" {
requestURL += "=" + url.QueryEscape(_value)
} else {
if _value != "" {
requestURL += "=" + url.QueryEscape(_value)
_value = "=" + _value
} else {
_value = ""
}
lowerKey := strings.ToLower(key)
_, ok := allowedResourceParameterNames[lowerKey]
prefixHeader := HEADER_PREFIX
isObs := conf.signature == SignatureObs
if isObs {
prefixHeader = HEADER_PREFIX_OBS
}
ok = ok || strings.HasPrefix(lowerKey, prefixHeader)
if ok {
if i == 0 {
canonicalizedURL += "?"
} else {
canonicalizedURL += "&"
}
canonicalizedURL += getQueryURL(_key, _value)
i++
}
}
}
return
}

func getQueryURL(key, value string) string {
queryURL := ""
queryURL += key
queryURL += value
return queryURL
}

func (obsClient ObsClient) getProgressListener(extensions []extensionOptions) ProgressListener {
for _, extension := range extensions {
if configure, ok := extension.(extensionProgressListener); ok {
return configure()
}
}
return nil
}

+ 329
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/const.go View File

@@ -0,0 +1,329 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

const (
OBS_SDK_VERSION = "3.25.9"
USER_AGENT = "obs-sdk-go/" + OBS_SDK_VERSION
HEADER_PREFIX = "x-amz-"
HEADER_PREFIX_META = "x-amz-meta-"
HEADER_PREFIX_OBS = "x-obs-"
HEADER_PREFIX_META_OBS = "x-obs-meta-"
HEADER_DATE_AMZ = "x-amz-date"
HEADER_DATE_OBS = "x-obs-date"
HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
HEADER_STS_TOKEN_OBS = "x-obs-security-token"
HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId"
PREFIX_META = "meta-"

HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
HEADER_ACL_AMZ = "x-amz-acl"
HEADER_ACL_OBS = "x-obs-acl"
HEADER_ACL = "acl"
HEADER_LOCATION_AMZ = "location"
HEADER_BUCKET_LOCATION_OBS = "bucket-location"
HEADER_COPY_SOURCE = "copy-source"
HEADER_COPY_SOURCE_RANGE = "copy-source-range"
HEADER_RANGE = "Range"
HEADER_STORAGE_CLASS = "x-default-storage-class"
HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class"
HEADER_FS_FILE_INTERFACE_OBS = "x-obs-fs-file-interface"
HEADER_MODE = "mode"
HEADER_VERSION_OBS = "version"
HEADER_REQUEST_PAYER = "x-amz-request-payer"
HEADER_GRANT_READ_OBS = "grant-read"
HEADER_GRANT_WRITE_OBS = "grant-write"
HEADER_GRANT_READ_ACP_OBS = "grant-read-acp"
HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp"
HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control"
HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered"
HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered"
HEADER_REQUEST_ID = "request-id"
HEADER_ERROR_CODE = "error-code"
HEADER_ERROR_INDICATOR = "x-reserved-indicator"
HEADER_ERROR_MESSAGE = "error-message"
HEADER_BUCKET_REGION = "bucket-region"
HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
HEADER_EPID_HEADERS = "epid"
HEADER_VERSION_ID = "version-id"
HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
HEADER_DELETE_MARKER = "delete-marker"
HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
HEADER_METADATA_DIRECTIVE = "metadata-directive"
HEADER_EXPIRATION = "expiration"
HEADER_EXPIRES_OBS = "x-obs-expires"
HEADER_RESTORE = "restore"
HEADER_OBJECT_TYPE = "object-type"
HEADER_NEXT_APPEND_POSITION = "next-append-position"
HEADER_STORAGE_CLASS2 = "storage-class"
HEADER_CONTENT_LENGTH = "content-length"
HEADER_CONTENT_TYPE = "content-type"
HEADER_CONTENT_LANGUAGE = "content-language"
HEADER_EXPIRES = "expires"
HEADER_CACHE_CONTROL = "cache-control"
HEADER_CONTENT_DISPOSITION = "content-disposition"
HEADER_CONTENT_ENCODING = "content-encoding"
HEADER_AZ_REDUNDANCY = "az-redundancy"
HEADER_BUCKET_TYPE = "bucket-type"
HEADER_BUCKET_REDUNDANCY = "bucket-redundancy"
HEADER_FUSION_ALLOW_UPGRADE = "fusion-allow-upgrade"
HEADER_FUSION_ALLOW_ALT = "fusion-allow-alternative"
headerOefMarker = "oef-marker"

HEADER_ETAG = "etag"
HEADER_LASTMODIFIED = "last-modified"

HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match"
HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match"
HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since"
HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since"

HEADER_IF_MATCH = "If-Match"
HEADER_IF_NONE_MATCH = "If-None-Match"
HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"

HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
HEADER_SSEC_KEY = "server-side-encryption-customer-key"
HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"

HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id"

HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm"
HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key"
HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5"

HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"

HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id"

HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect"

headerFSFileInterface = "fs-file-interface"

HEADER_DATE_CAMEL = "Date"
HEADER_HOST_CAMEL = "Host"
HEADER_HOST = "host"
HEADER_AUTH_CAMEL = "Authorization"
HEADER_MD5_CAMEL = "Content-MD5"
HEADER_SHA256_CAMEL = "Content-SHA256"
HEADER_SHA256 = "content-sha256"
HEADER_LOCATION_CAMEL = "Location"
HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
HEADER_CONTENT_TYPE_CAML = "Content-Type"
HEADER_USER_AGENT_CAMEL = "User-Agent"
HEADER_ORIGIN_CAMEL = "Origin"
HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
HEADER_CACHE_CONTROL_CAMEL = "Cache-Control"
HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition"
HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding"
HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language"
HEADER_EXPIRES_CAMEL = "Expires"
HEADER_ACCEPT_ENCODING = "Accept-Encoding"

PARAM_VERSION_ID = "versionId"
PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
PARAM_RESPONSE_EXPIRES = "response-expires"
PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
PARAM_IMAGE_PROCESS = "x-image-process"

PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
PARAM_DATE_OBS_CAMEL = "X-Obs-Date"
PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"

DEFAULT_SIGNATURE = SignatureV2
DEFAULT_REGION = "region"
DEFAULT_CONNECT_TIMEOUT = 60
DEFAULT_SOCKET_TIMEOUT = 60
DEFAULT_HEADER_TIMEOUT = 60
DEFAULT_IDLE_CONN_TIMEOUT = 30
DEFAULT_MAX_RETRY_COUNT = 3
DEFAULT_MAX_REDIRECT_COUNT = 3
DEFAULT_MAX_CONN_PER_HOST = 1000
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
LONG_DATE_FORMAT = "20060102T150405Z"
SHORT_DATE_FORMAT = "20060102"
ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"

V4_SERVICE_NAME = "s3"
V4_SERVICE_SUFFIX = "aws4_request"

V2_HASH_PREFIX = "AWS"
OBS_HASH_PREFIX = "OBS"

V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
V4_HASH_PRE = "AWS4"

DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms"

DEFAULT_SSE_C_ENCRYPTION = "AES256"

HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_HEAD = "HEAD"
HTTP_OPTIONS = "OPTIONS"

REQUEST_PAYER = "request-payer"
TRAFFIC_LIMIT = "traffic-limit"
CALLBACK = "callback"
MULTI_AZ = "3az"

MAX_PART_SIZE = 5 * 1024 * 1024 * 1024
MIN_PART_SIZE = 100 * 1024
DEFAULT_PART_SIZE = 9 * 1024 * 1024
MAX_PART_NUM = 10000

GET_OBJECT = "GetObject"
PUT_OBJECT = "PutObject"
PUT_FILE = "PutFile"
APPEND_OBJECT = "AppendObject"
MAX_CERT_XML_BODY_SIZE = 40 * 1024
CERT_ID_SIZE = 16
MAX_CERTIFICATE_NAME_LENGTH = 63
MIN_CERTIFICATE_NAME_LENGTH = 3
CERTIFICATE_FIELD_NAME = "CERTIFICATE ID SIZE"
NAME_LENGTH = "Name Length"
XML_SIZE = "XML SIZE"
)

var (
interestedHeaders = []string{"content-md5", "content-type", "date"}

allowedRequestHTTPHeaderMetadataNames = map[string]bool{
"content-type": true,
"content-md5": true,
"content-sha256": true,
"content-length": true,
"content-language": true,
"expires": true,
"origin": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
"access-control-request-method": true,
"access-control-request-headers": true,
"x-default-storage-class": true,
"location": true,
"date": true,
"etag": true,
"range": true,
"host": true,
"if-modified-since": true,
"if-unmodified-since": true,
"if-match": true,
"if-none-match": true,
"last-modified": true,
"content-range": true,
"accept-encoding": true,
"x-hic-info": true,
"safe-area": true,
}

allowedLogResponseHTTPHeaderNames = map[string]bool{
"content-type": true,
"etag": true,
"connection": true,
"content-length": true,
"date": true,
"server": true,
"x-reserved-indicator": true,
}

allowedResourceParameterNames = map[string]bool{
"acl": true,
"backtosource": true,
"metadata": true,
"policy": true,
"torrent": true,
"logging": true,
"location": true,
"storageinfo": true,
"quota": true,
"storageclass": true,
"storagepolicy": true,
"requestpayment": true,
"versions": true,
"versioning": true,
"versionid": true,
"uploads": true,
"uploadid": true,
"partnumber": true,
"website": true,
"notification": true,
"lifecycle": true,
"deletebucket": true,
"delete": true,
"cors": true,
"restore": true,
"encryption": true,
"tagging": true,
"append": true,
"modify": true,
"position": true,
"replication": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"x-image-process": true,
"x-oss-process": true,
"x-image-save-bucket": true,
"x-image-save-object": true,
"ignore-sign-in-query": true,
"name": true,
"rename": true,
"customdomain": true,
"mirrorbacktosource": true,
"x-obs-accesslabel": true,
"object-lock": true,
"retention": true,
"x-obs-security-token": true,
"truncate": true,
"length": true,
"inventory": true,
"directcoldaccess": true,
"attname": true,
"cdnnotifyconfiguration": true,
"publicaccessblock": true,
"bucketstatus": true,
"policystatus": true,
}

obsStorageClasses = []string{
string(StorageClassStandard),
string(StorageClassWarm),
string(StorageClassCold),
string(StorageClassDeepArchive),
string(StorageClassIntelligentTiering),
}
)

+ 1313
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/convert.go View File

@@ -0,0 +1,1313 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)

func cleanHeaderPrefix(header http.Header, isObs bool) map[string][]string {
responseHeaders := make(map[string][]string)
for key, value := range header {
if len(value) > 0 {
key = strings.ToLower(key)

if !isObs && strings.HasSuffix(key, HEADER_EXPIRES_OBS) {
responseHeaders[key] = value
continue
}
if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) {
key = key[len(HEADER_PREFIX):]
}
responseHeaders[key] = value
}
}
return responseHeaders
}

// ParseStringToEventType converts string value to EventType value and returns it
func ParseStringToEventType(value string) (ret EventType) {
switch value {
case "ObjectCreated:*", "s3:ObjectCreated:*":
ret = ObjectCreatedAll
case "ObjectCreated:Put", "s3:ObjectCreated:Put":
ret = ObjectCreatedPut
case "ObjectCreated:Post", "s3:ObjectCreated:Post":
ret = ObjectCreatedPost
case "ObjectCreated:Copy", "s3:ObjectCreated:Copy":
ret = ObjectCreatedCopy
case "ObjectCreated:CompleteMultipartUpload", "s3:ObjectCreated:CompleteMultipartUpload":
ret = ObjectCreatedCompleteMultipartUpload
case "ObjectRemoved:*", "s3:ObjectRemoved:*":
ret = ObjectRemovedAll
case "ObjectRemoved:Delete", "s3:ObjectRemoved:Delete":
ret = ObjectRemovedDelete
case "ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRemoved:DeleteMarkerCreated":
ret = ObjectRemovedDeleteMarkerCreated
default:
ret = ""
}
return
}

// ParseStringToStorageClassType converts string value to StorageClassType value and returns it
func ParseStringToStorageClassType(value string) (ret StorageClassType) {
switch value {
case string(StorageClassStandard):
ret = StorageClassStandard
case string(storageClassStandardIA), string(StorageClassWarm):
ret = StorageClassWarm
case string(storageClassGlacier), string(StorageClassCold):
ret = StorageClassCold
case string(StorageClassDeepArchive):
ret = StorageClassDeepArchive
case string(StorageClassIntelligentTiering):
ret = StorageClassIntelligentTiering
default:
ret = ""
}
return
}

func ParseStringToFSStatusType(value string) (ret FSStatusType) {
switch value {
case "Enabled":
ret = FSStatusEnabled
case "Disabled":
ret = FSStatusDisabled
default:
ret = ""
}
return
}

func prepareGrantURI(grantUri GroupUriType) string {
if grantUri == GroupAllUsers || grantUri == GroupAuthenticatedUsers {
return fmt.Sprintf("<URI>%s%s</URI>", "http://acs.amazonaws.com/groups/global/", grantUri)
}
if grantUri == GroupLogDelivery {
return fmt.Sprintf("<URI>%s%s</URI>", "http://acs.amazonaws.com/groups/s3/", grantUri)
}
return fmt.Sprintf("<URI>%s</URI>", grantUri)
}

func convertGrantToXML(grant Grant, isObs bool, isBucket bool) string {
xml := make([]string, 0, 4)

if grant.Grantee.ID != "" &&
grant.Grantee.URI == "" &&
grant.Grantee.Type == "" {
grant.Grantee.Type = GranteeUser
}

if grant.Grantee.URI != "" &&
grant.Grantee.ID == "" &&
grant.Grantee.Type == "" {
grant.Grantee.Type = GranteeGroup
}

if grant.Grantee.Type == GranteeUser {
if isObs {
xml = append(xml, "<Grant><Grantee>")
} else {
xml = append(xml, fmt.Sprintf("<Grant><Grantee xsi:type=\"%s\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">", grant.Grantee.Type))
}
if grant.Grantee.ID != "" {
granteeID := XmlTranscoding(grant.Grantee.ID)
xml = append(xml, fmt.Sprintf("<ID>%s</ID>", granteeID))
}
if !isObs && grant.Grantee.DisplayName != "" {
granteeDisplayName := XmlTranscoding(grant.Grantee.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", granteeDisplayName))
}
xml = append(xml, "</Grantee>")
} else {
if !isObs {
xml = append(xml, fmt.Sprintf("<Grant><Grantee xsi:type=\"%s\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">", grant.Grantee.Type))
xml = append(xml, prepareGrantURI(grant.Grantee.URI))
xml = append(xml, "</Grantee>")
} else if grant.Grantee.URI == GroupAllUsers {
xml = append(xml, "<Grant><Grantee>")
xml = append(xml, fmt.Sprintf("<Canned>Everyone</Canned>"))
xml = append(xml, "</Grantee>")
} else {
return strings.Join(xml, "")
}
}

xml = append(xml, fmt.Sprintf("<Permission>%s</Permission>", grant.Permission))
if isObs && isBucket {
xml = append(xml, fmt.Sprintf("<Delivered>%t</Delivered>", grant.Delivered))
}
xml = append(xml, fmt.Sprintf("</Grant>"))
return strings.Join(xml, "")
}

func hasLoggingTarget(input BucketLoggingStatus) bool {
if input.TargetBucket != "" || input.TargetPrefix != "" || len(input.TargetGrants) > 0 {
return true
}
return false
}

// ConvertLoggingStatusToXml converts BucketLoggingStatus value to XML data and returns it
func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool, isObs bool) (data string, md5 string) {
grantsLength := len(input.TargetGrants)
xml := make([]string, 0, 8+grantsLength)

if isObs {
xml = append(xml, "<BucketLoggingStatus>")
} else {
xml = append(xml, `<BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">`)
}
if isObs && input.Agency != "" {
agency := XmlTranscoding(input.Agency)
xml = append(xml, fmt.Sprintf("<Agency>%s</Agency>", agency))
}
if hasLoggingTarget(input) {
xml = append(xml, "<LoggingEnabled>")
if input.TargetBucket != "" {
xml = append(xml, fmt.Sprintf("<TargetBucket>%s</TargetBucket>", input.TargetBucket))
}
if input.TargetPrefix != "" {
targetPrefix := XmlTranscoding(input.TargetPrefix)
xml = append(xml, fmt.Sprintf("<TargetPrefix>%s</TargetPrefix>", targetPrefix))
}
if grantsLength > 0 {
xml = append(xml, "<TargetGrants>")
for _, grant := range input.TargetGrants {
xml = append(xml, convertGrantToXML(grant, isObs, false))
}
xml = append(xml, "</TargetGrants>")
}

xml = append(xml, "</LoggingEnabled>")
}
xml = append(xml, "</BucketLoggingStatus>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

// ConvertAclToXml converts AccessControlPolicy value to XML data and returns it
func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 4+len(input.Grants))
ownerID := XmlTranscoding(input.Owner.ID)
xml = append(xml, fmt.Sprintf("<AccessControlPolicy><Owner><ID>%s</ID>", ownerID))
if !isObs && input.Owner.DisplayName != "" {
ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", ownerDisplayName))
}
if isObs && input.Delivered != "" {
objectDelivered := XmlTranscoding(input.Delivered)
xml = append(xml, fmt.Sprintf("</Owner><Delivered>%s</Delivered><AccessControlList>", objectDelivered))
} else {
xml = append(xml, "</Owner><AccessControlList>")
}
for _, grant := range input.Grants {
xml = append(xml, convertGrantToXML(grant, isObs, false))
}
xml = append(xml, "</AccessControlList></AccessControlPolicy>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertBucketACLToXML(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 4+len(input.Grants))
ownerID := XmlTranscoding(input.Owner.ID)
xml = append(xml, fmt.Sprintf("<AccessControlPolicy><Owner><ID>%s</ID>", ownerID))
if !isObs && input.Owner.DisplayName != "" {
ownerDisplayName := XmlTranscoding(input.Owner.DisplayName)
xml = append(xml, fmt.Sprintf("<DisplayName>%s</DisplayName>", ownerDisplayName))
}

xml = append(xml, "</Owner><AccessControlList>")

for _, grant := range input.Grants {
xml = append(xml, convertGrantToXML(grant, isObs, true))
}
xml = append(xml, "</AccessControlList></AccessControlPolicy>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertConditionToXML(condition Condition) string {
xml := make([]string, 0, 2)
if condition.KeyPrefixEquals != "" {
keyPrefixEquals := XmlTranscoding(condition.KeyPrefixEquals)
xml = append(xml, fmt.Sprintf("<KeyPrefixEquals>%s</KeyPrefixEquals>", keyPrefixEquals))
}
if condition.HttpErrorCodeReturnedEquals != "" {
xml = append(xml, fmt.Sprintf("<HttpErrorCodeReturnedEquals>%s</HttpErrorCodeReturnedEquals>", condition.HttpErrorCodeReturnedEquals))
}
if len(xml) > 0 {
return fmt.Sprintf("<Condition>%s</Condition>", strings.Join(xml, ""))
}
return ""
}

func prepareRoutingRule(input BucketWebsiteConfiguration) string {
xml := make([]string, 0, len(input.RoutingRules)*10)
for _, routingRule := range input.RoutingRules {
xml = append(xml, "<RoutingRule>")
xml = append(xml, "<Redirect>")
if routingRule.Redirect.Protocol != "" {
xml = append(xml, fmt.Sprintf("<Protocol>%s</Protocol>", routingRule.Redirect.Protocol))
}
if routingRule.Redirect.HostName != "" {
xml = append(xml, fmt.Sprintf("<HostName>%s</HostName>", routingRule.Redirect.HostName))
}
if routingRule.Redirect.ReplaceKeyPrefixWith != "" {
replaceKeyPrefixWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyPrefixWith)
xml = append(xml, fmt.Sprintf("<ReplaceKeyPrefixWith>%s</ReplaceKeyPrefixWith>", replaceKeyPrefixWith))
}

if routingRule.Redirect.ReplaceKeyWith != "" {
replaceKeyWith := XmlTranscoding(routingRule.Redirect.ReplaceKeyWith)
xml = append(xml, fmt.Sprintf("<ReplaceKeyWith>%s</ReplaceKeyWith>", replaceKeyWith))
}
if routingRule.Redirect.HttpRedirectCode != "" {
xml = append(xml, fmt.Sprintf("<HttpRedirectCode>%s</HttpRedirectCode>", routingRule.Redirect.HttpRedirectCode))
}
xml = append(xml, "</Redirect>")

if ret := convertConditionToXML(routingRule.Condition); ret != "" {
xml = append(xml, ret)
}
xml = append(xml, "</RoutingRule>")
}
return strings.Join(xml, "")
}

// ConvertWebsiteConfigurationToXml converts BucketWebsiteConfiguration value to XML data and returns it
func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) {
routingRuleLength := len(input.RoutingRules)
xml := make([]string, 0, 6+routingRuleLength*10)
xml = append(xml, "<WebsiteConfiguration>")

if input.RedirectAllRequestsTo.HostName != "" {
xml = append(xml, fmt.Sprintf("<RedirectAllRequestsTo><HostName>%s</HostName>", input.RedirectAllRequestsTo.HostName))
if input.RedirectAllRequestsTo.Protocol != "" {
xml = append(xml, fmt.Sprintf("<Protocol>%s</Protocol>", input.RedirectAllRequestsTo.Protocol))
}
xml = append(xml, "</RedirectAllRequestsTo>")
} else {
if input.IndexDocument.Suffix != "" {
indexDocumentSuffix := XmlTranscoding(input.IndexDocument.Suffix)
xml = append(xml, fmt.Sprintf("<IndexDocument><Suffix>%s</Suffix></IndexDocument>", indexDocumentSuffix))
}
if input.ErrorDocument.Key != "" {
errorDocumentKey := XmlTranscoding(input.ErrorDocument.Key)
xml = append(xml, fmt.Sprintf("<ErrorDocument><Key>%s</Key></ErrorDocument>", errorDocumentKey))
}
if routingRuleLength > 0 {
xml = append(xml, "<RoutingRules>")
xml = append(xml, prepareRoutingRule(input))
xml = append(xml, "</RoutingRules>")
}
}

xml = append(xml, "</WebsiteConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertTransitionsToXML(transitions []Transition, isObs bool) string {
if length := len(transitions); length > 0 {
xml := make([]string, 0, length)
for _, transition := range transitions {
var temp string
if transition.Days > 0 {
temp = fmt.Sprintf("<Days>%d</Days>", transition.Days)
} else if !transition.Date.IsZero() {
temp = fmt.Sprintf("<Date>%s</Date>", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
}
if temp != "" {
if !isObs {
storageClass := string(transition.StorageClass)
if transition.StorageClass == StorageClassWarm {
storageClass = string(storageClassStandardIA)
} else if transition.StorageClass == StorageClassCold {
storageClass = string(storageClassGlacier)
}
xml = append(xml, fmt.Sprintf("<Transition>%s<StorageClass>%s</StorageClass></Transition>", temp, storageClass))
} else {
xml = append(xml, fmt.Sprintf("<Transition>%s<StorageClass>%s</StorageClass></Transition>", temp, transition.StorageClass))
}
}
}
return strings.Join(xml, "")
}
return ""
}

func convertLifeCycleFilterToXML(filter LifecycleFilter) string {
if filter.Prefix == "" && len(filter.Tags) == 0 {
return ""
}
data, err := TransToXml(filter)
if err != nil {
return ""
}
return string(data)
}

func convertExpirationToXML(expiration Expiration) string {
if expiration.Days > 0 {
return fmt.Sprintf("<Expiration><Days>%d</Days></Expiration>", expiration.Days)
} else if !expiration.Date.IsZero() {
return fmt.Sprintf("<Expiration><Date>%s</Date></Expiration>", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
} else if expiration.ExpiredObjectDeleteMarker != "" {
return fmt.Sprintf("<Expiration><ExpiredObjectDeleteMarker>%s</ExpiredObjectDeleteMarker></Expiration>", expiration.ExpiredObjectDeleteMarker)
}
return ""
}

func convertNoncurrentVersionTransitionsToXML(noncurrentVersionTransitions []NoncurrentVersionTransition, isObs bool) string {
if length := len(noncurrentVersionTransitions); length > 0 {
xml := make([]string, 0, length)
for _, noncurrentVersionTransition := range noncurrentVersionTransitions {
if noncurrentVersionTransition.NoncurrentDays > 0 {
storageClass := string(noncurrentVersionTransition.StorageClass)
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
}
}
xml = append(xml, fmt.Sprintf("<NoncurrentVersionTransition><NoncurrentDays>%d</NoncurrentDays>"+
"<StorageClass>%s</StorageClass></NoncurrentVersionTransition>",
noncurrentVersionTransition.NoncurrentDays, storageClass))
}
}
return strings.Join(xml, "")
}
return ""
}
func convertNoncurrentVersionExpirationToXML(noncurrentVersionExpiration NoncurrentVersionExpiration) string {
if noncurrentVersionExpiration.NoncurrentDays > 0 {
return fmt.Sprintf("<NoncurrentVersionExpiration><NoncurrentDays>%d</NoncurrentDays></NoncurrentVersionExpiration>", noncurrentVersionExpiration.NoncurrentDays)
}
return ""
}

func convertAbortIncompleteMultipartUploadToXML(abortIncompleteMultipartUpload AbortIncompleteMultipartUpload) string {
if abortIncompleteMultipartUpload.DaysAfterInitiation > 0 {
return fmt.Sprintf("<AbortIncompleteMultipartUpload><DaysAfterInitiation>%d</DaysAfterInitiation></AbortIncompleteMultipartUpload>", abortIncompleteMultipartUpload.DaysAfterInitiation)
}
return ""
}

// ConvertLifecycleConfigurationToXml converts BucketLifecycleConfiguration value to XML data and returns it
func ConvertLifecycleConfigurationToXml(input BucketLifecycleConfiguration, returnMd5, isObs, enableSha256 bool) (data string, md5OrSha256 string) {
xml := make([]string, 0, 2+len(input.LifecycleRules)*9)
xml = append(xml, "<LifecycleConfiguration>")
for _, lifecycleRule := range input.LifecycleRules {
xml = append(xml, "<Rule>")
if lifecycleRule.ID != "" {
lifecycleRuleID := XmlTranscoding(lifecycleRule.ID)
xml = append(xml, fmt.Sprintf("<ID>%s</ID>", lifecycleRuleID))
}
lifecycleRulePrefix := XmlTranscoding(lifecycleRule.Prefix)
lifecycleRuleFilter := convertLifeCycleFilterToXML(lifecycleRule.Filter)
if lifecycleRulePrefix != "" || (lifecycleRulePrefix == "" && lifecycleRuleFilter == "") {
xml = append(xml, fmt.Sprintf("<Prefix>%s</Prefix>", lifecycleRulePrefix))
}
if lifecycleRuleFilter != "" {
xml = append(xml, lifecycleRuleFilter)
}
xml = append(xml, fmt.Sprintf("<Status>%s</Status>", lifecycleRule.Status))
if ret := convertTransitionsToXML(lifecycleRule.Transitions, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := convertExpirationToXML(lifecycleRule.Expiration); ret != "" {
xml = append(xml, ret)
}
if ret := convertNoncurrentVersionTransitionsToXML(lifecycleRule.NoncurrentVersionTransitions, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := convertNoncurrentVersionExpirationToXML(lifecycleRule.NoncurrentVersionExpiration); ret != "" {
xml = append(xml, ret)
}
if ret := convertAbortIncompleteMultipartUploadToXML(lifecycleRule.AbortIncompleteMultipartUpload); ret != "" {
xml = append(xml, ret)
}
xml = append(xml, "</Rule>")
}
xml = append(xml, "</LifecycleConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5OrSha256 = Base64Md5OrSha256([]byte(data), enableSha256)
}
return
}

// ConvertEncryptionConfigurationToXml converts BucketEncryptionConfiguration value to XML data and returns it
func ConvertEncryptionConfigurationToXml(input BucketEncryptionConfiguration, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 5)
xml = append(xml, "<ServerSideEncryptionConfiguration><Rule><ApplyServerSideEncryptionByDefault>")

algorithm := XmlTranscoding(input.SSEAlgorithm)
xml = append(xml, fmt.Sprintf("<SSEAlgorithm>%s</SSEAlgorithm>", algorithm))

if input.KMSMasterKeyID != "" {
kmsKeyID := XmlTranscoding(input.KMSMasterKeyID)
xml = append(xml, fmt.Sprintf("<KMSMasterKeyID>%s</KMSMasterKeyID>", kmsKeyID))
}
if input.ProjectID != "" {
projectID := XmlTranscoding(input.ProjectID)
xml = append(xml, fmt.Sprintf("<ProjectID>%s</ProjectID>", projectID))
}

xml = append(xml, "</ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func converntFilterRulesToXML(filterRules []FilterRule, isObs bool) string {
if length := len(filterRules); length > 0 {
xml := make([]string, 0, length*4)
for _, filterRule := range filterRules {
xml = append(xml, "<FilterRule>")
if filterRule.Name != "" {
filterRuleName := XmlTranscoding(filterRule.Name)
xml = append(xml, fmt.Sprintf("<Name>%s</Name>", filterRuleName))
}
if filterRule.Value != "" {
filterRuleValue := XmlTranscoding(filterRule.Value)
xml = append(xml, fmt.Sprintf("<Value>%s</Value>", filterRuleValue))
}
xml = append(xml, "</FilterRule>")
}
if !isObs {
return fmt.Sprintf("<Filter><S3Key>%s</S3Key></Filter>", strings.Join(xml, ""))
}
return fmt.Sprintf("<Filter><Object>%s</Object></Filter>", strings.Join(xml, ""))
}
return ""
}

func converntEventsToXML(events []EventType, isObs bool) string {
if length := len(events); length > 0 {
xml := make([]string, 0, length)
if !isObs {
for _, event := range events {
xml = append(xml, fmt.Sprintf("<Event>%s%s</Event>", "s3:", event))
}
} else {
for _, event := range events {
xml = append(xml, fmt.Sprintf("<Event>%s</Event>", event))
}
}
return strings.Join(xml, "")
}
return ""
}

func converntConfigureToXML(topicConfiguration TopicConfiguration, xmlElem string, isObs bool) string {
xml := make([]string, 0, 6)
xml = append(xml, xmlElem)
if topicConfiguration.ID != "" {
topicConfigurationID := XmlTranscoding(topicConfiguration.ID)
xml = append(xml, fmt.Sprintf("<Id>%s</Id>", topicConfigurationID))
}
topicConfigurationTopic := XmlTranscoding(topicConfiguration.Topic)
xml = append(xml, fmt.Sprintf("<Topic>%s</Topic>", topicConfigurationTopic))

if ret := converntEventsToXML(topicConfiguration.Events, isObs); ret != "" {
xml = append(xml, ret)
}
if ret := converntFilterRulesToXML(topicConfiguration.FilterRules, isObs); ret != "" {
xml = append(xml, ret)
}
tempElem := xmlElem[0:1] + "/" + xmlElem[1:]
xml = append(xml, tempElem)
return strings.Join(xml, "")
}

// ConventObsRestoreToXml converts RestoreObjectInput value to XML data and returns it
func ConventObsRestoreToXml(restoreObjectInput RestoreObjectInput) string {
xml := make([]string, 0, 2)
xml = append(xml, fmt.Sprintf("<RestoreRequest><Days>%d</Days>", restoreObjectInput.Days))
if restoreObjectInput.Tier != "Bulk" {
xml = append(xml, fmt.Sprintf("<RestoreJob><Tier>%s</Tier></RestoreJob>", restoreObjectInput.Tier))
}
xml = append(xml, fmt.Sprintf("</RestoreRequest>"))
data := strings.Join(xml, "")
return data
}

// ConvertNotificationToXml converts BucketNotification value to XML data and returns it
func ConvertNotificationToXml(input BucketNotification, returnMd5 bool, isObs bool) (data string, md5 string) {
xml := make([]string, 0, 2+len(input.TopicConfigurations)*6)
xml = append(xml, "<NotificationConfiguration>")
for _, topicConfiguration := range input.TopicConfigurations {
ret := converntConfigureToXML(topicConfiguration, "<TopicConfiguration>", isObs)
xml = append(xml, ret)
}
xml = append(xml, "</NotificationConfiguration>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

// ConvertCompleteMultipartUploadInputToXml converts CompleteMultipartUploadInput value to XML data and returns it
func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) {
xml := make([]string, 0, 2+len(input.Parts)*4)
xml = append(xml, "<CompleteMultipartUpload>")
for _, part := range input.Parts {
xml = append(xml, "<Part>")
xml = append(xml, fmt.Sprintf("<PartNumber>%d</PartNumber>", part.PartNumber))
xml = append(xml, fmt.Sprintf("<ETag>%s</ETag>", part.ETag))
xml = append(xml, "</Part>")
}
xml = append(xml, "</CompleteMultipartUpload>")
data = strings.Join(xml, "")
if returnMd5 {
md5 = Base64Md5([]byte(data))
}
return
}

func convertDeleteObjectsToXML(input DeleteObjectsInput) (data string, md5 string) {
xml := make([]string, 0, 4+len(input.Objects)*4)
xml = append(xml, "<Delete>")
if input.Quiet {
xml = append(xml, fmt.Sprintf("<Quiet>%t</Quiet>", input.Quiet))
}
if input.EncodingType != "" {
encodingType := XmlTranscoding(input.EncodingType)
xml = append(xml, fmt.Sprintf("<EncodingType>%s</EncodingType>", encodingType))
}
for _, obj := range input.Objects {
xml = append(xml, "<Object>")
key := XmlTranscoding(obj.Key)
xml = append(xml, fmt.Sprintf("<Key>%s</Key>", key))
if obj.VersionId != "" {
xml = append(xml, fmt.Sprintf("<VersionId>%s</VersionId>", obj.VersionId))
}
xml = append(xml, "</Object>")
}
xml = append(xml, "</Delete>")
data = strings.Join(xml, "")
md5 = Base64Md5([]byte(data))
return
}

func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) {
if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok {
sseCHeader := SseCHeader{Encryption: ret[0]}
if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok {
sseCHeader.KeyMD5 = ret[0]
}
sseHeader = sseCHeader
} else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok {
sseKmsHeader := SseKmsHeader{Encryption: ret[0]}
if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok {
sseKmsHeader.Key = ret[0]
} else if ret, ok = responseHeaders[HEADER_SSEKMS_ENCRYPT_KEY_OBS]; ok {
sseKmsHeader.Key = ret[0]
}
sseHeader = sseKmsHeader
}
return
}

func parseCorsHeader(output BaseModel) (AllowOrigin, AllowHeader, AllowMethod, ExposeHeader string, MaxAgeSeconds int) {
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok {
AllowOrigin = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok {
AllowHeader = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok {
MaxAgeSeconds = StringToInt(ret[0], 0)
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok {
AllowMethod = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok {
ExposeHeader = ret[0]
}
return
}

func parseUnCommonHeader(output *GetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
output.WebsiteRedirectLocation = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok {
output.Expiration = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok {
output.Restore = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_OBJECT_TYPE]; ok {
output.ObjectType = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok {
output.NextAppendPosition = ret[0]
}
}

func parseStandardMetadataHeader(output *GetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
output.ContentType = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
output.ContentEncoding = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
output.CacheControl = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
output.ContentDisposition = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
output.ContentLanguage = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
output.HttpExpires = ret[0]
}
}

// ParseGetObjectMetadataOutput sets GetObjectMetadataOutput field values with response headers
func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) {
output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
parseUnCommonHeader(output)
parseStandardMetadataHeader(output)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}

output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok {
ret, err := time.Parse(time.RFC1123, ret[0])
if err == nil {
output.LastModified = ret
}
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok {
output.ContentLength = StringToInt64(ret[0], 0)
}

output.Metadata = make(map[string]string)

for key, value := range output.ResponseHeaders {
if strings.HasPrefix(key, PREFIX_META) {
_key := key[len(PREFIX_META):]
output.ResponseHeaders[_key] = value
output.Metadata[_key] = value[0]
delete(output.ResponseHeaders, key)
}
}

}

// ParseCopyObjectOutput sets CopyObjectOutput field values with response headers
func ParseCopyObjectOutput(output *CopyObjectOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok {
output.CopySourceVersionId = ret[0]
}
}

// ParsePutObjectOutput sets PutObjectOutput field values with response headers
func ParsePutObjectOutput(output *PutObjectOutput) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
}

// ParseInitiateMultipartUploadOutput sets InitiateMultipartUploadOutput field values with response headers
func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
}

// ParseUploadPartOutput sets UploadPartOutput field values with response headers
func ParseUploadPartOutput(output *UploadPartOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
}

// ParseCompleteMultipartUploadOutput sets CompleteMultipartUploadOutput field values with response headers
func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
}

// ParseCopyPartOutput sets CopyPartOutput field values with response headers
func ParseCopyPartOutput(output *CopyPartOutput) {
output.SseHeader = parseSseHeader(output.ResponseHeaders)
}

// ParseStringToAvailableZoneType converts string value to AvailableZoneType value and returns it
func ParseStringToAvailableZoneType(value string) (ret AvailableZoneType) {
switch value {
case "3az":
ret = AvailableZoneMultiAz
default:
ret = ""
}
return
}

// ParseGetBucketMetadataOutput sets GetBucketMetadataOutput field values with response headers
func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) {
output.AllowOrigin, output.AllowHeader, output.AllowMethod, output.ExposeHeader, output.MaxAgeSeconds = parseCorsHeader(output.BaseModel)
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
} else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_VERSION_OBS]; ok {
output.Version = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = ret[0]
} else if ret, ok := output.ResponseHeaders[HEADER_BUCKET_LOCATION_OBS]; ok {
output.Location = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EPID_HEADERS]; ok {
output.Epid = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_AZ_REDUNDANCY]; ok {
output.AZRedundancy = ParseStringToAvailableZoneType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REDUNDANCY]; ok {
output.BucketRedundancy = parseStringToBucketRedundancy(ret[0])
}
if ret, ok := output.ResponseHeaders[headerFSFileInterface]; ok {
output.FSStatus = parseStringToFSStatusType(ret[0])
} else {
output.FSStatus = FSStatusDisabled
}
}

func parseContentHeader(output *SetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
output.ContentDisposition = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
output.ContentEncoding = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
output.ContentLanguage = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
output.ContentType = ret[0]
}
}

// ParseSetObjectMetadataOutput sets SetObjectMetadataOutput field values with response headers
func ParseSetObjectMetadataOutput(output *SetObjectMetadataOutput) {
if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
} else if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
output.StorageClass = ParseStringToStorageClassType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_METADATA_DIRECTIVE]; ok {
output.MetadataDirective = MetadataDirectiveType(ret[0])
}
if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
output.CacheControl = ret[0]
}
parseContentHeader(output)
if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
output.Expires = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
output.WebsiteRedirectLocation = ret[0]
}
output.Metadata = make(map[string]string)

for key, value := range output.ResponseHeaders {
if strings.HasPrefix(key, PREFIX_META) {
_key := key[len(PREFIX_META):]
output.ResponseHeaders[_key] = value
output.Metadata[_key] = value[0]
delete(output.ResponseHeaders, key)
}
}
}

// ParseDeleteObjectOutput sets DeleteObjectOutput field values with response headers
func ParseDeleteObjectOutput(output *DeleteObjectOutput) {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}

if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
output.DeleteMarker = deleteMarker[0] == "true"
}
}

// ParseGetObjectOutput sets GetObjectOutput field values with response headers
func ParseGetObjectOutput(output *GetObjectOutput) {
ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput)
if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
output.DeleteMarker = ret[0] == "true"
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
output.ContentType = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
output.CacheControl = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
output.ContentDisposition = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
output.ContentEncoding = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
output.ContentLanguage = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
output.Expires = ret[0]
output.HttpExpires = ret[0]
}
}

// ConvertRequestToIoReaderV2 converts req to XML data
func ConvertRequestToIoReaderV2(req interface{}, enableSha256 bool) (io.Reader, string, error) {
data, err := TransToXml(req)
md5OrSha256 := Base64Md5OrSha256(data, enableSha256)
if err == nil {
return bytes.NewReader(data), md5OrSha256, nil
}
return nil, "", err
}

// ConvertRequestToIoReader converts req to XML data
func ConvertRequestToIoReader(req interface{}) (io.Reader, error) {
body, err := TransToXml(req)
if err == nil {
return bytes.NewReader(body), nil
}
return nil, err
}

func parseResponseBodyOutput(s reflect.Type, baseModel IBaseModel, body []byte) {
for i := 0; i < s.NumField(); i++ {
if s.Field(i).Tag == "json:\"body\"" {
reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body))
break
}
}
}

// ParseCallbackResponseToBaseModel gets response from Callback Service
func ParseCallbackResponseToBaseModel(resp *http.Response, baseModel IBaseModel, isObs bool) error {
baseModel.setStatusCode(resp.StatusCode)
responseHeaders := cleanHeaderPrefix(resp.Header, isObs)
baseModel.setResponseHeaders(responseHeaders)
if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {
baseModel.setRequestID(values[0])
}
readCloser, ok := baseModel.(ICallbackReadCloser)
if !ok {
return errors.New("Failed to set CallbackBody with resp's body.")
}
readCloser.setCallbackReadCloser(resp.Body)
return nil
}

// ParseResponseToBaseModel gets response from OBS
func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) {
readCloser, ok := baseModel.(IReadCloser)
if !ok {
defer func() {
errMsg := resp.Body.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close response body")
}
}()
var body []byte
body, err = ioutil.ReadAll(resp.Body)
if err == nil && len(body) > 0 {

name := reflect.TypeOf(baseModel).Elem().Name()
if xmlResult {
err = ParseXml(body, baseModel)
} else {
s := reflect.TypeOf(baseModel).Elem()
if name == "GetBucketPolicyOutput" || name == "GetBucketMirrorBackToSourceOutput" {
parseResponseBodyOutput(s, baseModel, body)
} else {
err = parseJSON(body, baseModel)
}
}
if err != nil {
doLog(LEVEL_ERROR, "body: %s", body)
if _, ok := baseModel.(*ObsError); !ok && name == "CopyObjectOutput" {
doLog(LEVEL_ERROR, "Unmarshal error: %v, try parse response to ObsError", err)
err = ParseResponseToObsError(resp, isObs)
} else {
doLog(LEVEL_ERROR, "Unmarshal error: %v", err)
}
}
}
} else {
readCloser.setReadCloser(resp.Body)
}

baseModel.setStatusCode(resp.StatusCode)
responseHeaders := cleanHeaderPrefix(resp.Header, isObs)
baseModel.setResponseHeaders(responseHeaders)
if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {
baseModel.setRequestID(values[0])
}
return
}

// ParseResponseToObsError gets obsError from OBS
func ParseResponseToObsError(resp *http.Response, isObs bool) error {
isJson := false
if contentType, ok := resp.Header[HEADER_CONTENT_TYPE_CAML]; ok {
jsonType, _ := mimeTypes["json"]
isJson = contentType[0] == jsonType
}
obsError := ObsError{}
respError := ParseResponseToBaseModel(resp, &obsError, !isJson, isObs)
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
obsError.Status = resp.Status
responseHeaders := cleanHeaderPrefix(resp.Header, isObs)
if values, ok := responseHeaders[HEADER_ERROR_MESSAGE]; ok {
obsError.Message = values[0]
}
if values, ok := responseHeaders[HEADER_ERROR_CODE]; ok {
obsError.Code = values[0]
}
if values, ok := responseHeaders[HEADER_ERROR_INDICATOR]; ok {
obsError.Indicator = values[0]
}
return obsError
}

// convertFetchPolicyToJSON converts SetBucketFetchPolicyInput into json format
func convertFetchPolicyToJSON(input SetBucketFetchPolicyInput) (data string, err error) {
fetch := map[string]SetBucketFetchPolicyInput{"fetch": input}
json, err := json.Marshal(fetch)
if err != nil {
return "", err
}
data = string(json)
return
}

// convertFetchJobToJSON converts SetBucketFetchJobInput into json format
func convertFetchJobToJSON(input SetBucketFetchJobInput) (data string, err error) {
objectHeaders := make(map[string]string)
for key, value := range input.ObjectHeaders {
if value != "" {
_key := strings.ToLower(key)
if !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
_key = HEADER_PREFIX_META_OBS + _key
}
objectHeaders[_key] = value
}
}
input.ObjectHeaders = objectHeaders
json, err := json.Marshal(input)
if err != nil {
return "", err
}
data = string(json)
return
}

func parseStringToFSStatusType(value string) (ret FSStatusType) {
switch value {
case "Enabled":
ret = FSStatusEnabled
case "Disabled":
ret = FSStatusDisabled
default:
ret = ""
}
return
}

func parseStringToBucketRedundancy(value string) (ret BucketRedundancyType) {
switch value {
case "FUSION":
ret = BucketRedundancyFusion
case "CLASSIC":
ret = BucketRedundancyClassic
default:
ret = ""
}
return
}

func decodeListObjectsOutput(output *ListObjectsOutput) (err error) {
output.Delimiter, err = url.QueryUnescape(output.Delimiter)
if err != nil {
return
}
output.Marker, err = url.QueryUnescape(output.Marker)
if err != nil {
return
}
output.NextMarker, err = url.QueryUnescape(output.NextMarker)
if err != nil {
return
}
output.Prefix, err = url.QueryUnescape(output.Prefix)
if err != nil {
return
}
for index, value := range output.CommonPrefixes {
output.CommonPrefixes[index], err = url.QueryUnescape(value)
if err != nil {
return
}
}
for index, content := range output.Contents {
output.Contents[index].Key, err = url.QueryUnescape(content.Key)
if err != nil {
return
}
}
return
}

func decodeListPosixObjectsOutput(output *ListPosixObjectsOutput) (err error) {
output.Delimiter, err = url.QueryUnescape(output.Delimiter)
if err != nil {
return
}
output.Marker, err = url.QueryUnescape(output.Marker)
if err != nil {
return
}
output.NextMarker, err = url.QueryUnescape(output.NextMarker)
if err != nil {
return
}
output.Prefix, err = url.QueryUnescape(output.Prefix)
if err != nil {
return
}
for index, value := range output.CommonPrefixes {
output.CommonPrefixes[index].Prefix, err = url.QueryUnescape(value.Prefix)
if err != nil {
return
}
}
for index, content := range output.Contents {
output.Contents[index].Key, err = url.QueryUnescape(content.Key)
if err != nil {
return
}
}
return
}

func decodeListVersionsOutput(output *ListVersionsOutput) (err error) {
output.Delimiter, err = url.QueryUnescape(output.Delimiter)
if err != nil {
return
}
output.KeyMarker, err = url.QueryUnescape(output.KeyMarker)
if err != nil {
return
}
output.NextKeyMarker, err = url.QueryUnescape(output.NextKeyMarker)
if err != nil {
return
}
output.Prefix, err = url.QueryUnescape(output.Prefix)
if err != nil {
return
}
for index, version := range output.Versions {
output.Versions[index].Key, err = url.QueryUnescape(version.Key)
if err != nil {
return
}
}
for index, deleteMarker := range output.DeleteMarkers {
output.DeleteMarkers[index].Key, err = url.QueryUnescape(deleteMarker.Key)
if err != nil {
return
}
}
for index, value := range output.CommonPrefixes {
output.CommonPrefixes[index], err = url.QueryUnescape(value)
if err != nil {
return
}
}
return
}

func decodeDeleteObjectsOutput(output *DeleteObjectsOutput) (err error) {
for index, object := range output.Deleteds {
output.Deleteds[index].Key, err = url.QueryUnescape(object.Key)
if err != nil {
return
}
}
for index, object := range output.Errors {
output.Errors[index].Key, err = url.QueryUnescape(object.Key)
if err != nil {
return
}
}
return
}

func decodeListMultipartUploadsOutput(output *ListMultipartUploadsOutput) (err error) {
output.Delimiter, err = url.QueryUnescape(output.Delimiter)
if err != nil {
return
}
output.Prefix, err = url.QueryUnescape(output.Prefix)
if err != nil {
return
}
output.KeyMarker, err = url.QueryUnescape(output.KeyMarker)
if err != nil {
return
}
output.NextKeyMarker, err = url.QueryUnescape(output.NextKeyMarker)
if err != nil {
return
}
for index, value := range output.CommonPrefixes {
output.CommonPrefixes[index], err = url.QueryUnescape(value)
if err != nil {
return
}
}
for index, upload := range output.Uploads {
output.Uploads[index].Key, err = url.QueryUnescape(upload.Key)
if err != nil {
return
}
}
return
}

func decodeListPartsOutput(output *ListPartsOutput) (err error) {
output.Key, err = url.QueryUnescape(output.Key)
return
}

func decodeInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) (err error) {
output.Key, err = url.QueryUnescape(output.Key)
return
}

func decodeCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) (err error) {
output.Key, err = url.QueryUnescape(output.Key)
return
}

// ParseAppendObjectOutput sets AppendObjectOutput field values with response headers
func ParseAppendObjectOutput(output *AppendObjectOutput) (err error) {
if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = ret[0]
}
output.SseHeader = parseSseHeader(output.ResponseHeaders)
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
if ret, ok := output.ResponseHeaders[HEADER_NEXT_APPEND_POSITION]; ok {
output.NextAppendPosition, err = strconv.ParseInt(ret[0], 10, 64)
if err != nil {
err = fmt.Errorf("failed to parse next append position with error [%v]", err)
}
}
return
}

// ParseModifyObjectOutput sets ModifyObjectOutput field values with response headers
func ParseModifyObjectOutput(output *ModifyObjectOutput) {
if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
output.ETag = ret[0]
}
}

func ParseGetBucketFSStatusOutput(output *GetBucketFSStatusOutput) {
ParseGetBucketMetadataOutput(&output.GetBucketMetadataOutput)

if ret, ok := output.ResponseHeaders[HEADER_FS_FILE_INTERFACE_OBS]; ok {
output.FSStatus = ParseStringToFSStatusType(ret[0])
}
}

func ParseGetAttributeOutput(output *GetAttributeOutput) {
ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput)
if ret, ok := output.ResponseHeaders[HEADER_MODE]; ok {
output.Mode = StringToInt(ret[0], -1)
} else {
output.Mode = -1
}
}

func ParseNewFolderOutput(output *NewFolderOutput) {
ParsePutObjectOutput(&output.PutObjectOutput)
}

+ 36
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/error.go View File

@@ -0,0 +1,36 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
"fmt"
)

// ObsError defines error response from OBS
type ObsError struct {
BaseModel
Status string
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code" json:"code"`
Message string `xml:"Message" json:"message"`
Resource string `xml:"Resource"`
HostId string `xml:"HostId"`
Indicator string
}

// Format print obs error's log
func (err ObsError) Error() string {
return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s, Indicator=%s.",
err.Status, err.Code, err.Message, err.RequestId, err.Indicator)
}

+ 103
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/extension.go View File

@@ -0,0 +1,103 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"strconv"
"strings"
)

type extensionOptions interface{}
type extensionHeaders func(headers map[string][]string, isObs bool) error
type extensionProgressListener func() ProgressListener

func WithProgress(progressListener ProgressListener) extensionProgressListener {
return func() ProgressListener {
return progressListener
}
}

func setHeaderPrefix(key string, value string) extensionHeaders {
return func(headers map[string][]string, isObs bool) error {
if strings.TrimSpace(value) == "" {
return fmt.Errorf("set header %s with empty value", key)
}
setHeaders(headers, key, []string{value}, isObs)
return nil
}
}

// WithReqPaymentHeader sets header for requester-pays
func WithReqPaymentHeader(requester PayerType) extensionHeaders {
return setHeaderPrefix(REQUEST_PAYER, string(requester))
}

func WithTrafficLimitHeader(trafficLimit int64) extensionHeaders {
return setHeaderPrefix(TRAFFIC_LIMIT, strconv.FormatInt(trafficLimit, 10))
}

func WithCallbackHeader(callback string) extensionHeaders {
return setHeaderPrefix(CALLBACK, string(callback))
}

func WithCustomHeader(key string, value string) extensionHeaders {
return func(headers map[string][]string, isObs bool) error {
if strings.TrimSpace(value) == "" {
return fmt.Errorf("set header %s with empty value", key)
}
headers[key] = []string{value}
return nil
}
}

func PreprocessCallbackInputToSHA256(callbackInput *CallbackInput) (output string, err error) {

if callbackInput == nil {
return "", fmt.Errorf("the parameter can not be nil")
}

if callbackInput.CallbackUrl == "" {
return "", fmt.Errorf("the parameter [CallbackUrl] can not be empty")
}

if callbackInput.CallbackBody == "" {
return "", fmt.Errorf("the parameter [CallbackBody] can not be empty")
}

callbackBuffer := bytes.NewBuffer([]byte{})
callbackEncoder := json.NewEncoder(callbackBuffer)
// 避免HTML字符转义
callbackEncoder.SetEscapeHTML(false)
err = callbackEncoder.Encode(callbackInput)
if err != nil {
return "", err
}
callbackVal := base64.StdEncoding.EncodeToString(removeEndNewlineCharacter(callbackBuffer))
// 计算SHA256哈希
hash := sha256.Sum256([]byte(callbackVal))

// 将哈希转换为十六进制字符串
return hex.EncodeToString(hash[:]), nil

}

// Encode函数会默认在json结尾增加换行符,导致base64转码结果与其他方式不一致,需要去掉末尾的换行符
func removeEndNewlineCharacter(callbackBuffer *bytes.Buffer) []byte {
return callbackBuffer.Bytes()[:callbackBuffer.Len()-1]
}

+ 689
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/http.go View File

@@ -0,0 +1,689 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)

func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string {
_headers := make(map[string][]string, len(headers))
for key, value := range headers {
key = strings.TrimSpace(key)
if key == "" {
continue
}
_key := strings.ToLower(key)
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
if !meta {
continue
}
if !isObs {
_key = HEADER_PREFIX_META + _key
} else {
_key = HEADER_PREFIX_META_OBS + _key
}
} else {
_key = key
}
_headers[_key] = value
}
return _headers
}

func (obsClient ObsClient) checkParamsWithBucketName(bucketName string) bool {
return strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname
}

func (obsClient ObsClient) checkParamsWithObjectKey(objectKey string) bool {
return strings.TrimSpace(objectKey) == ""
}

func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
return obsClient.doAction(action, method, "", "", input, output, true, true, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
if obsClient.checkParamsWithObjectKey(objectKey) {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, true, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucketAndKeyWithProgress(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions, listener ProgressListener) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
if obsClient.checkParamsWithObjectKey(objectKey) {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, true, extensions, listener)
}

func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
if obsClient.checkParamsWithObjectKey(objectKey) {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
if obsClient.checkParamsWithObjectKey(objectKey) {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, false, extensions, nil)
}

func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatableWithProgress(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions, listener ProgressListener) error {
if obsClient.checkParamsWithBucketName(bucketName) {
return errors.New("Bucket is empty")
}
if obsClient.checkParamsWithObjectKey(objectKey) {
return errors.New("Key is empty")
}
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, false, extensions, listener)
}

func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions, listener ProgressListener) error {

var resp *http.Response
var respError error
doLog(LEVEL_INFO, "Enter method %s...", action)
start := GetCurrentTimestamp()
isObs := obsClient.conf.signature == SignatureObs

params, headers, data, err := input.trans(isObs)
if err != nil {
return err
}

if params == nil {
params = make(map[string]string)
}

if headers == nil {
headers = make(map[string][]string)
}

for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
if _err := extensionHeader(headers, isObs); _err != nil {
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_INFO, "Unsupported extensionOptions")
}
}

resp, respError = obsClient.doHTTPRequest(method, bucketName, objectKey, params, headers, data, repeatable, listener)

if respError == nil && output != nil {
respError = HandleHttpResponse(action, headers, output, resp, xmlResult, isObs)
} else {
doLog(LEVEL_WARN, "Do http request with error: %v", respError)
}

if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}

return respError
}

func (obsClient ObsClient) doHTTPRequest(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool, listener ProgressListener) (*http.Response, error) {
return obsClient.doHTTP(method, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable, listener)
}

func prepareAgentHeader(clientUserAgent string) string {
userAgent := USER_AGENT
if clientUserAgent != "" {
userAgent = clientUserAgent
}
return userAgent
}

func (obsClient ObsClient) getSignedURLResponse(action string, output IBaseModel, xmlResult bool, resp *http.Response, err error, start int64) (respError error) {
var msg interface{}
isObs := obsClient.conf.signature == SignatureObs
if err != nil {
respError = err
resp = nil
} else {
if logConf.level <= LEVEL_DEBUG {
doLog(LEVEL_DEBUG, "Response headers: %s", logResponseHeader(resp.Header))
}
if resp.StatusCode >= 300 {
respError = ParseResponseToObsError(resp, isObs)
msg = resp.Status
resp = nil
} else {
if output != nil {
respError = ParseResponseToBaseModel(resp, output, xmlResult, isObs)
}
if respError != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
}
}
}

if msg != nil {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
}

if isDebugLogEnabled() {
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
}
return
}

func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
req, err := http.NewRequest(method, signedURL, data)
if err != nil {
return err
}
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
var resp *http.Response

var isSecurityToken bool
var securityToken string
var query []string
parmas := strings.Split(signedURL, "?")
if len(parmas) > 1 {
query = strings.Split(parmas[1], "&")
for _, value := range query {
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:]
isSecurityToken = true
}
}
}
}
logSignedURL := signedURL
if isSecurityToken {
logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1)
}
doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL)

req.Header = actualSignedRequestHeaders
if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST_CAMEL)
} else if value, ok := req.Header[HEADER_HOST]; ok {
req.Host = value[0]
delete(req.Header, HEADER_HOST)
}

if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
} else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
req.ContentLength = StringToInt64(value[0], -1)
delete(req.Header, HEADER_CONTENT_LENGTH)
}

userAgent := prepareAgentHeader(obsClient.conf.userAgent)
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}
start := GetCurrentTimestamp()
resp, err = obsClient.httpClient.Do(req)
if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
}

respError = obsClient.getSignedURLResponse(action, output, xmlResult, resp, err, start)

return
}

func prepareData(headers map[string][]string, data interface{}) (io.Reader, error) {
var _data io.Reader
if data != nil {
if dataStr, ok := data.(string); ok {
doLog(LEVEL_DEBUG, "Do http request with string")
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{IntToString(len(dataStr))}
_data = strings.NewReader(dataStr)
} else if dataByte, ok := data.([]byte); ok {
doLog(LEVEL_DEBUG, "Do http request with byte array")
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{IntToString(len(dataByte))}
_data = bytes.NewReader(dataByte)
} else if dataReader, ok := data.(io.Reader); ok {
_data = dataReader
} else {
doLog(LEVEL_WARN, "Data is not a valid io.Reader")
return nil, errors.New("Data is not a valid io.Reader")
}
}
return _data, nil
}

func (obsClient ObsClient) getRequest(redirectURL, requestURL string, redirectFlag bool, _data io.Reader, method,
bucketName, objectKey string, params map[string]string, headers map[string][]string) (*http.Request, error) {
if redirectURL != "" {
if !redirectFlag {
parsedRedirectURL, err := url.Parse(redirectURL)
if err != nil {
return nil, err
}
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host)
if err != nil {
return nil, err
}
if parsedRequestURL, err := url.Parse(requestURL); err != nil {
return nil, err
} else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" {
redirectURL += "?" + parsedRequestURL.RawQuery
}
}
requestURL = redirectURL
} else {
var err error
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
if err != nil {
return nil, err
}
}

req, err := http.NewRequest(method, requestURL, _data)
if obsClient.conf.ctx != nil {
req = req.WithContext(obsClient.conf.ctx)
}
if err != nil {
return nil, err
}
doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method)
return req, nil
}

func logHeaders(headers map[string][]string, signature SignatureType) {
if isDebugLogEnabled() {
auth := headers[HEADER_AUTH_CAMEL]
delete(headers, HEADER_AUTH_CAMEL)

var isSecurityToken bool
var securityToken []string
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
} else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
}
if logConf.level <= LEVEL_DEBUG {
doLog(LEVEL_DEBUG, "Request headers: %s", logRequestHeader(headers))
}
headers[HEADER_AUTH_CAMEL] = auth
if isSecurityToken {
if signature == SignatureObs {
headers[HEADER_STS_TOKEN_OBS] = securityToken
} else {
headers[HEADER_STS_TOKEN_AMZ] = securityToken
}
}
}
}

func prepareReq(headers map[string][]string, req, lastRequest *http.Request, clientUserAgent string) *http.Request {
for key, value := range headers {
if key == HEADER_HOST_CAMEL {
req.Host = value[0]
delete(headers, key)
} else if key == HEADER_CONTENT_LENGTH_CAMEL {
req.ContentLength = StringToInt64(value[0], -1)
delete(headers, key)
} else {
req.Header[key] = value
}
}

lastRequest = req

userAgent := prepareAgentHeader(clientUserAgent)
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}

if lastRequest != nil {
req.Host = lastRequest.Host
req.ContentLength = lastRequest.ContentLength
}
return lastRequest
}

func canNotRetry(repeatable bool, statusCode int) bool {
if !repeatable || (statusCode >= 400 && statusCode < 500) || statusCode == 304 {
return true
}
return false
}

func isRedirectErr(location string, redirectCount, maxRedirectCount int) bool {
if location != "" && redirectCount < maxRedirectCount {
return true
}
return false
}

func setRedirectFlag(statusCode int, method string) (redirectFlag bool) {
if statusCode == 302 && method == HTTP_GET {
redirectFlag = true
} else {
redirectFlag = false
}
return
}

func prepareRetry(resp *http.Response, headers map[string][]string, _data io.Reader, msg interface{}) (io.Reader, *http.Response, error) {
if resp != nil {
_err := resp.Body.Close()
checkAndLogErr(_err, LEVEL_WARN, "Failed to close resp body")
resp = nil
}

if _, ok := headers[HEADER_DATE_CAMEL]; ok {
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
}

if _, ok := headers[HEADER_DATE_AMZ]; ok {
headers[HEADER_DATE_AMZ] = []string{FormatUtcToRfc1123(time.Now().UTC())}
}

if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
delete(headers, HEADER_AUTH_CAMEL)
}
doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
if r, ok := _data.(*strings.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, nil, err
}
} else if r, ok := _data.(*bytes.Reader); ok {
_, err := r.Seek(0, 0)
if err != nil {
return nil, nil, err
}
} else if r, ok := _data.(*fileReaderWrapper); ok {
fd, err := os.Open(r.filePath)
if err != nil {
return nil, nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
fileReaderWrapper.mark = r.mark
fileReaderWrapper.reader = fd
fileReaderWrapper.totalCount = r.totalCount
_data = fileReaderWrapper
_, err = fd.Seek(r.mark, 0)
if err != nil {
errMsg := fd.Close()
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
return nil, nil, err
}
} else if r, ok := _data.(*readerWrapper); ok {
_, err := r.seek(0, 0)
if err != nil {
return nil, nil, err
}
r.readedCount = 0
}
return _data, resp, nil
}

// handleBody handles request body
func handleBody(req *http.Request, body io.Reader, listener ProgressListener, tracker *readerTracker) {
reader := body
if ret, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; !ok {
readerLen, err := GetReaderLen(reader)
if err == nil {
req.ContentLength = readerLen
}
if req.ContentLength > 0 {
req.Header.Set(HEADER_CONTENT_LENGTH_CAMEL, strconv.FormatInt(req.ContentLength, 10))
}
} else {
req.ContentLength = StringToInt64(ret[0], 0)
}

if reader != nil {
reader = TeeReader(reader, req.ContentLength, listener, tracker)
}

// HTTP body
rc, ok := reader.(io.ReadCloser)
if !ok && reader != nil {
rc = ioutil.NopCloser(reader)
}

req.Body = rc
}

func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string,
headers map[string][]string, data interface{}, repeatable bool, listener ProgressListener) (resp *http.Response, respError error) {
defer func() {
_ = recover()
}()
bucketName = strings.TrimSpace(bucketName)

method = strings.ToUpper(method)

var redirectURL string
var requestURL string
maxRetryCount := obsClient.conf.maxRetryCount
maxRedirectCount := obsClient.conf.maxRedirectCount

_data, _err := prepareData(headers, data)
if _err != nil {
return nil, _err
}

var lastRequest *http.Request
redirectFlag := false

tracker := &readerTracker{completedBytes: 0}

for i, redirectCount := 0, 0; i <= maxRetryCount; i++ {
req, err := obsClient.getRequest(redirectURL, requestURL, redirectFlag, _data,
method, bucketName, objectKey, params, headers)
if err != nil {
return nil, err
}

handleBody(req, _data, listener, tracker)

lastRequest = prepareReq(headers, req, lastRequest, obsClient.conf.userAgent)

logHeaders(lastRequest.Header, obsClient.conf.signature)

// Transfer started
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
publishProgress(listener, event)

start := GetCurrentTimestamp()
isObs := obsClient.conf.signature == SignatureObs
resp, err = obsClient.httpClient.Do(req)
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))

var msg interface{}
if err != nil {
msg = err
respError = err
resp = nil
if !repeatable {
break
}
} else {
if logConf.level <= LEVEL_DEBUG {
doLog(LEVEL_DEBUG, "resp.StatusCode [%d] resp.Status [%s] Response headers: [%s]", resp.StatusCode, resp.Status, logResponseHeader(resp.Header))
}
if resp.StatusCode < 300 {
event := newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
respError = nil
break
} else if canNotRetry(repeatable, resp.StatusCode) {
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)

respError = ParseResponseToObsError(resp, isObs)
resp = nil
break
} else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
location := resp.Header.Get(HEADER_LOCATION_CAMEL)
if isRedirectErr(location, redirectCount, maxRedirectCount) {
redirectURL = location
doLog(LEVEL_WARN, "Redirect request to %s", redirectURL)
msg = resp.Status
maxRetryCount++
redirectCount++
redirectFlag = setRedirectFlag(resp.StatusCode, method)
} else {
respError = ParseResponseToObsError(resp, isObs)
resp = nil
break
}
} else {
msg = resp.Status
}
}
if i != maxRetryCount {
_data, resp, err = prepareRetry(resp, headers, _data, msg)
if err != nil {
return nil, err
}
if r, ok := _data.(*fileReaderWrapper); ok {
if _fd, _ok := r.reader.(*os.File); _ok {
defer func() {
errMsg := _fd.Close()
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
}()
}
}
time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
} else {
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
if resp != nil {
respError = ParseResponseToObsError(resp, isObs)
resp = nil
}
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
publishProgress(listener, event)
}
}
return
}

type connDelegate struct {
conn net.Conn
socketTimeout time.Duration
finalTimeout time.Duration
}

func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
return &connDelegate{
conn: conn,
socketTimeout: time.Second * time.Duration(socketTimeout),
finalTimeout: time.Second * time.Duration(finalTimeout),
}
}

func (delegate *connDelegate) Read(b []byte) (n int, err error) {
setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()

if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}

n, err = delegate.conn.Read(b)
setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}

func (delegate *connDelegate) Write(b []byte) (n int, err error) {
setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
flag := isDebugLogEnabled()
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}

n, err = delegate.conn.Write(b)
finalTimeout := time.Now().Add(delegate.finalTimeout)
setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout)
if setWriteDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
}
setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout)
if setReadDeadlineErr != nil && flag {
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
}
return n, err
}

func (delegate *connDelegate) Close() error {
return delegate.conn.Close()
}

func (delegate *connDelegate) LocalAddr() net.Addr {
return delegate.conn.LocalAddr()
}

func (delegate *connDelegate) RemoteAddr() net.Addr {
return delegate.conn.RemoteAddr()
}

func (delegate *connDelegate) SetDeadline(t time.Time) error {
return delegate.conn.SetDeadline(t)
}

func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
return delegate.conn.SetReadDeadline(t)
}

func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
return delegate.conn.SetWriteDeadline(t)
}

+ 394
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/log.go View File

@@ -0,0 +1,394 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)

// Level defines the level of the log
type Level int

const (
LEVEL_OFF Level = 500
LEVEL_ERROR Level = 400
LEVEL_WARN Level = 300
LEVEL_INFO Level = 200
LEVEL_DEBUG Level = 100
)

var logLevelMap = map[Level]string{
LEVEL_OFF: "[OFF]: ",
LEVEL_ERROR: "[ERROR]: ",
LEVEL_WARN: "[WARN]: ",
LEVEL_INFO: "[INFO]: ",
LEVEL_DEBUG: "[DEBUG]: ",
}

type logConfType struct {
level Level
logToConsole bool
logFullPath string
maxLogSize int64
backups int
}

func getDefaultLogConf() logConfType {
return logConfType{
level: LEVEL_WARN,
logToConsole: false,
logFullPath: "",
maxLogSize: 1024 * 1024 * 30, //30MB
backups: 10,
}
}

var logConf logConfType

type loggerWrapper struct {
fullPath string
fd *os.File
ch chan string
wg sync.WaitGroup
queue []string
logger *log.Logger
index int
cacheCount int
closed bool
loc *time.Location
}

func (lw *loggerWrapper) doInit() {
lw.queue = make([]string, 0, lw.cacheCount)
lw.logger = log.New(lw.fd, "", 0)
lw.ch = make(chan string, lw.cacheCount)
if lw.loc == nil {
lw.loc = time.FixedZone("UTC", 0)
}
lw.wg.Add(1)
go lw.doWrite()
}

func (lw *loggerWrapper) rotate() {
stat, err := lw.fd.Stat()
if err != nil {
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
panic(err)
}
if stat.Size() >= logConf.maxLogSize {
_err := lw.fd.Sync()
if _err != nil {
panic(_err)
}
_err = lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
if lw.index > logConf.backups {
lw.index = 1
}
_err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
if _err != nil {
panic(_err)
}
lw.index++

fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
panic(err)
}
lw.fd = fd
lw.logger.SetOutput(lw.fd)
}
}

func (lw *loggerWrapper) doFlush() {
lw.rotate()
for _, m := range lw.queue {
lw.logger.Println(m)
}
err := lw.fd.Sync()
if err != nil {
panic(err)
}
}

func (lw *loggerWrapper) doClose() {
lw.closed = true
close(lw.ch)
lw.wg.Wait()
}

func (lw *loggerWrapper) doWrite() {
defer lw.wg.Done()
for {
msg, ok := <-lw.ch
if !ok {
lw.doFlush()
_err := lw.fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
break
}
if len(lw.queue) >= lw.cacheCount {
lw.doFlush()
lw.queue = make([]string, 0, lw.cacheCount)
}
lw.queue = append(lw.queue, msg)
}

}

func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
if !lw.closed {
msg := fmt.Sprintf(format, v...)
lw.ch <- msg
}
}

var consoleLogger *log.Logger
var fileLogger *loggerWrapper
var lock = new(sync.RWMutex)

func isDebugLogEnabled() bool {
return logConf.level <= LEVEL_DEBUG
}

func isErrorLogEnabled() bool {
return logConf.level <= LEVEL_ERROR
}

func isWarnLogEnabled() bool {
return logConf.level <= LEVEL_WARN
}

func isInfoLogEnabled() bool {
return logConf.level <= LEVEL_INFO
}

func reset() {
if fileLogger != nil {
fileLogger.doClose()
fileLogger = nil
}
consoleLogger = nil
logConf = getDefaultLogConf()
}

type logConfig func(lw *loggerWrapper)

func WithLoggerTimeLoc(loc *time.Location) logConfig {
return func(lw *loggerWrapper) {
lw.loc = loc
}
}

// InitLog enable logging function with default cacheCnt
func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, logConfigs ...logConfig) error {

return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50, logConfigs...)
}

// InitLogWithCacheCnt enable logging function
func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int, logConfigs ...logConfig) error {
lock.Lock()
defer lock.Unlock()
if cacheCnt <= 0 {
cacheCnt = 50
}
reset()
if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
_fullPath, err := filepath.Abs(fullPath)
if err != nil {
return err
}

if !strings.HasSuffix(_fullPath, ".log") {
_fullPath += ".log"
}

stat, fd, err := initLogFile(_fullPath)
if err != nil {
return err
}

prefix := stat.Name() + "."
index := 1
var timeIndex int64 = 0
walkFunc := func(path string, info os.FileInfo, err error) error {
if err == nil {
if name := info.Name(); strings.HasPrefix(name, prefix) {
if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex {
timeIndex = info.ModTime().Unix()
index = i + 1
}
}
}
return err
}

if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return err
}

fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false}
for _, logConfig := range logConfigs {
logConfig(fileLogger)
}
fileLogger.doInit()
}
if maxLogSize > 0 {
logConf.maxLogSize = maxLogSize
}
if backups > 0 {
logConf.backups = backups
}
logConf.level = level
if logToConsole {
consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
}
return nil
}

func initLogFile(_fullPath string) (os.FileInfo, *os.File, error) {
stat, err := os.Stat(_fullPath)
if err == nil && stat.IsDir() {
return nil, nil, fmt.Errorf("logFullPath:[%s] is a directory", _fullPath)
} else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
return nil, nil, err
}

fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return nil, nil, err
}

if stat == nil {
stat, err = os.Stat(_fullPath)
if err != nil {
_err := fd.Close()
if _err != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
}
return nil, nil, err
}
}

return stat, fd, nil
}

// CloseLog disable logging and synchronize cache data to log files
func CloseLog() {
if logEnabled() {
lock.Lock()
defer lock.Unlock()
reset()
}
}

func logEnabled() bool {
return consoleLogger != nil || fileLogger != nil
}

// DoLog writes log messages to the logger
func DoLog(level Level, format string, v ...interface{}) {
doLog(level, format, v...)
}

func doLog(level Level, format string, v ...interface{}) {
if logEnabled() && logConf.level <= level {
msg := fmt.Sprintf(format, v...)
if _, file, line, ok := runtime.Caller(1); ok {
index := strings.LastIndex(file, "/")
if index >= 0 {
file = file[index+1:]
}
msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
}
prefix := logLevelMap[level]
defer func() {
_ = recover()
// ignore ch closed error
}()
nowDate := FormatNowWithLoc("2006-01-02T15:04:05.000ZZ", fileLogger.loc)

if consoleLogger != nil {
consoleLogger.Printf("%s%s", prefix, msg)
}
if fileLogger != nil {
fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
}
}
}

func checkAndLogErr(err error, level Level, format string, v ...interface{}) {
if err != nil {
doLog(level, format, v...)
}
}

func logResponseHeader(respHeader http.Header) string {
resp := make([]string, 0, len(respHeader)+1)
for key, value := range respHeader {
key = strings.TrimSpace(key)
if key == "" {
continue
}
_key := strings.ToLower(key)
if strings.HasPrefix(_key, HEADER_PREFIX) || strings.HasPrefix(_key, HEADER_PREFIX_OBS) {
_key = _key[len(HEADER_PREFIX):]
}
if _, ok := allowedLogResponseHTTPHeaderNames[_key]; ok {
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
}
if _key == HEADER_REQUEST_ID {
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
}
}
return strings.Join(resp, " ")
}

func logRequestHeader(reqHeader http.Header) string {
resp := make([]string, 0, len(reqHeader)+1)
for key, value := range reqHeader {
key = strings.TrimSpace(key)
if key == "" {
continue
}
_key := strings.ToLower(key)
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; ok {
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
}
}
return strings.Join(resp, " ")
}

+ 404
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/mime.go View File

@@ -0,0 +1,404 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

var mimeTypes = map[string]string{
"001": "application/x-001",
"301": "application/x-301",
"323": "text/h323",
"7z": "application/x-7z-compressed",
"906": "application/x-906",
"907": "drawing/907",
"IVF": "video/x-ivf",
"a11": "application/x-a11",
"aac": "audio/x-aac",
"acp": "audio/x-mei-aac",
"ai": "application/postscript",
"aif": "audio/aiff",
"aifc": "audio/aiff",
"aiff": "audio/aiff",
"anv": "application/x-anv",
"apk": "application/vnd.android.package-archive",
"asa": "text/asa",
"asf": "video/x-ms-asf",
"asp": "text/asp",
"asx": "video/x-ms-asf",
"atom": "application/atom+xml",
"au": "audio/basic",
"avi": "video/avi",
"awf": "application/vnd.adobe.workflow",
"biz": "text/xml",
"bmp": "application/x-bmp",
"bot": "application/x-bot",
"bz2": "application/x-bzip2",
"c4t": "application/x-c4t",
"c90": "application/x-c90",
"cal": "application/x-cals",
"cat": "application/vnd.ms-pki.seccat",
"cdf": "application/x-netcdf",
"cdr": "application/x-cdr",
"cel": "application/x-cel",
"cer": "application/x-x509-ca-cert",
"cg4": "application/x-g4",
"cgm": "application/x-cgm",
"cit": "application/x-cit",
"class": "java/*",
"cml": "text/xml",
"cmp": "application/x-cmp",
"cmx": "application/x-cmx",
"cot": "application/x-cot",
"crl": "application/pkix-crl",
"crt": "application/x-x509-ca-cert",
"csi": "application/x-csi",
"css": "text/css",
"csv": "text/csv",
"cu": "application/cu-seeme",
"cut": "application/x-cut",
"dbf": "application/x-dbf",
"dbm": "application/x-dbm",
"dbx": "application/x-dbx",
"dcd": "text/xml",
"dcx": "application/x-dcx",
"deb": "application/x-debian-package",
"der": "application/x-x509-ca-cert",
"dgn": "application/x-dgn",
"dib": "application/x-dib",
"dll": "application/x-msdownload",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"dot": "application/msword",
"drw": "application/x-drw",
"dtd": "text/xml",
"dvi": "application/x-dvi",
"dwf": "application/x-dwf",
"dwg": "application/x-dwg",
"dxb": "application/x-dxb",
"dxf": "application/x-dxf",
"edn": "application/vnd.adobe.edn",
"emf": "application/x-emf",
"eml": "message/rfc822",
"ent": "text/xml",
"eot": "application/vnd.ms-fontobject",
"epi": "application/x-epi",
"eps": "application/postscript",
"epub": "application/epub+zip",
"etd": "application/x-ebx",
"etx": "text/x-setext",
"exe": "application/x-msdownload",
"fax": "image/fax",
"fdf": "application/vnd.fdf",
"fif": "application/fractals",
"flac": "audio/flac",
"flv": "video/x-flv",
"fo": "text/xml",
"frm": "application/x-frm",
"g4": "application/x-g4",
"gbr": "application/x-gbr",
"gif": "image/gif",
"gl2": "application/x-gl2",
"gp4": "application/x-gp4",
"gz": "application/gzip",
"hgl": "application/x-hgl",
"hmr": "application/x-hmr",
"hpg": "application/x-hpgl",
"hpl": "application/x-hpl",
"hqx": "application/mac-binhex40",
"hrf": "application/x-hrf",
"hta": "application/hta",
"htc": "text/x-component",
"htm": "text/html",
"html": "text/html",
"htt": "text/webviewhtml",
"htx": "text/html",
"icb": "application/x-icb",
"ico": "application/x-ico",
"ics": "text/calendar",
"iff": "application/x-iff",
"ig4": "application/x-g4",
"igs": "application/x-igs",
"iii": "application/x-iphone",
"img": "application/x-img",
"ini": "text/plain",
"ins": "application/x-internet-signup",
"ipa": "application/vnd.iphone",
"iso": "application/x-iso9660-image",
"isp": "application/x-internet-signup",
"jar": "application/java-archive",
"java": "java/*",
"jfif": "image/jpeg",
"jpe": "image/jpeg",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"js": "application/x-javascript",
"json": "application/json",
"jsp": "text/html",
"la1": "audio/x-liquid-file",
"lar": "application/x-laplayer-reg",
"latex": "application/x-latex",
"lavs": "audio/x-liquid-secure",
"lbm": "application/x-lbm",
"lmsff": "audio/x-la-lms",
"log": "text/plain",
"ls": "application/x-javascript",
"ltr": "application/x-ltr",
"m1v": "video/x-mpeg",
"m2v": "video/x-mpeg",
"m3u": "audio/mpegurl",
"m4a": "audio/mp4",
"m4e": "video/mpeg4",
"m4v": "video/mp4",
"mac": "application/x-mac",
"man": "application/x-troff-man",
"math": "text/xml",
"mdb": "application/msaccess",
"mfp": "application/x-shockwave-flash",
"mht": "message/rfc822",
"mhtml": "message/rfc822",
"mi": "application/x-mi",
"mid": "audio/mid",
"midi": "audio/mid",
"mil": "application/x-mil",
"mml": "text/xml",
"mnd": "audio/x-musicnet-download",
"mns": "audio/x-musicnet-stream",
"mocha": "application/x-javascript",
"mov": "video/quicktime",
"movie": "video/x-sgi-movie",
"mp1": "audio/mp1",
"mp2": "audio/mp2",
"mp2v": "video/mpeg",
"mp3": "audio/mp3",
"mp4": "video/mp4",
"mp4a": "audio/mp4",
"mp4v": "video/mp4",
"mpa": "video/x-mpg",
"mpd": "application/vnd.ms-project",
"mpe": "video/mpeg",
"mpeg": "video/mpeg",
"mpg": "video/mpeg",
"mpg4": "video/mp4",
"mpga": "audio/rn-mpeg",
"mpp": "application/vnd.ms-project",
"mps": "video/x-mpeg",
"mpt": "application/vnd.ms-project",
"mpv": "video/mpg",
"mpv2": "video/mpeg",
"mpw": "application/vnd.ms-project",
"mpx": "application/vnd.ms-project",
"mtx": "text/xml",
"mxp": "application/x-mmxp",
"net": "image/pnetvue",
"nrf": "application/x-nrf",
"nws": "message/rfc822",
"odc": "text/x-ms-odc",
"oga": "audio/ogg",
"ogg": "audio/ogg",
"ogv": "video/ogg",
"ogx": "application/ogg",
"out": "application/x-out",
"p10": "application/pkcs10",
"p12": "application/x-pkcs12",
"p7b": "application/x-pkcs7-certificates",
"p7c": "application/pkcs7-mime",
"p7m": "application/pkcs7-mime",
"p7r": "application/x-pkcs7-certreqresp",
"p7s": "application/pkcs7-signature",
"pbm": "image/x-portable-bitmap",
"pc5": "application/x-pc5",
"pci": "application/x-pci",
"pcl": "application/x-pcl",
"pcx": "application/x-pcx",
"pdf": "application/pdf",
"pdx": "application/vnd.adobe.pdx",
"pfx": "application/x-pkcs12",
"pgl": "application/x-pgl",
"pgm": "image/x-portable-graymap",
"pic": "application/x-pic",
"pko": "application/vnd.ms-pki.pko",
"pl": "application/x-perl",
"plg": "text/html",
"pls": "audio/scpls",
"plt": "application/x-plt",
"png": "image/png",
"pnm": "image/x-portable-anymap",
"pot": "application/vnd.ms-powerpoint",
"ppa": "application/vnd.ms-powerpoint",
"ppm": "application/x-ppm",
"pps": "application/vnd.ms-powerpoint",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"pr": "application/x-pr",
"prf": "application/pics-rules",
"prn": "application/x-prn",
"prt": "application/x-prt",
"ps": "application/postscript",
"ptn": "application/x-ptn",
"pwz": "application/vnd.ms-powerpoint",
"qt": "video/quicktime",
"r3t": "text/vnd.rn-realtext3d",
"ra": "audio/vnd.rn-realaudio",
"ram": "audio/x-pn-realaudio",
"rar": "application/x-rar-compressed",
"ras": "application/x-ras",
"rat": "application/rat-file",
"rdf": "text/xml",
"rec": "application/vnd.rn-recording",
"red": "application/x-red",
"rgb": "application/x-rgb",
"rjs": "application/vnd.rn-realsystem-rjs",
"rjt": "application/vnd.rn-realsystem-rjt",
"rlc": "application/x-rlc",
"rle": "application/x-rle",
"rm": "application/vnd.rn-realmedia",
"rmf": "application/vnd.adobe.rmf",
"rmi": "audio/mid",
"rmj": "application/vnd.rn-realsystem-rmj",
"rmm": "audio/x-pn-realaudio",
"rmp": "application/vnd.rn-rn_music_package",
"rms": "application/vnd.rn-realmedia-secure",
"rmvb": "application/vnd.rn-realmedia-vbr",
"rmx": "application/vnd.rn-realsystem-rmx",
"rnx": "application/vnd.rn-realplayer",
"rp": "image/vnd.rn-realpix",
"rpm": "audio/x-pn-realaudio-plugin",
"rsml": "application/vnd.rn-rsml",
"rss": "application/rss+xml",
"rt": "text/vnd.rn-realtext",
"rtf": "application/x-rtf",
"rv": "video/vnd.rn-realvideo",
"sam": "application/x-sam",
"sat": "application/x-sat",
"sdp": "application/sdp",
"sdw": "application/x-sdw",
"sgm": "text/sgml",
"sgml": "text/sgml",
"sis": "application/vnd.symbian.install",
"sisx": "application/vnd.symbian.install",
"sit": "application/x-stuffit",
"slb": "application/x-slb",
"sld": "application/x-sld",
"slk": "drawing/x-slk",
"smi": "application/smil",
"smil": "application/smil",
"smk": "application/x-smk",
"snd": "audio/basic",
"sol": "text/plain",
"sor": "text/plain",
"spc": "application/x-pkcs7-certificates",
"spl": "application/futuresplash",
"spp": "text/xml",
"ssm": "application/streamingmedia",
"sst": "application/vnd.ms-pki.certstore",
"stl": "application/vnd.ms-pki.stl",
"stm": "text/html",
"sty": "application/x-sty",
"svg": "image/svg+xml",
"swf": "application/x-shockwave-flash",
"tar": "application/x-tar",
"tdf": "application/x-tdf",
"tg4": "application/x-tg4",
"tga": "application/x-tga",
"tif": "image/tiff",
"tiff": "image/tiff",
"tld": "text/xml",
"top": "drawing/x-top",
"torrent": "application/x-bittorrent",
"tsd": "text/xml",
"ttf": "application/x-font-ttf",
"txt": "text/plain",
"uin": "application/x-icq",
"uls": "text/iuls",
"vcf": "text/x-vcard",
"vda": "application/x-vda",
"vdx": "application/vnd.visio",
"vml": "text/xml",
"vpg": "application/x-vpeg005",
"vsd": "application/vnd.visio",
"vss": "application/vnd.visio",
"vst": "application/x-vst",
"vsw": "application/vnd.visio",
"vsx": "application/vnd.visio",
"vtx": "application/vnd.visio",
"vxml": "text/xml",
"wav": "audio/wav",
"wax": "audio/x-ms-wax",
"wb1": "application/x-wb1",
"wb2": "application/x-wb2",
"wb3": "application/x-wb3",
"wbmp": "image/vnd.wap.wbmp",
"webm": "video/webm",
"wiz": "application/msword",
"wk3": "application/x-wk3",
"wk4": "application/x-wk4",
"wkq": "application/x-wkq",
"wks": "application/x-wks",
"wm": "video/x-ms-wm",
"wma": "audio/x-ms-wma",
"wmd": "application/x-ms-wmd",
"wmf": "application/x-wmf",
"wml": "text/vnd.wap.wml",
"wmv": "video/x-ms-wmv",
"wmx": "video/x-ms-wmx",
"wmz": "application/x-ms-wmz",
"woff": "application/x-font-woff",
"wp6": "application/x-wp6",
"wpd": "application/x-wpd",
"wpg": "application/x-wpg",
"wpl": "application/vnd.ms-wpl",
"wq1": "application/x-wq1",
"wr1": "application/x-wr1",
"wri": "application/x-wri",
"wrk": "application/x-wrk",
"ws": "application/x-ws",
"ws2": "application/x-ws",
"wsc": "text/scriptlet",
"wsdl": "text/xml",
"wvx": "video/x-ms-wvx",
"x_b": "application/x-x_b",
"x_t": "application/x-x_t",
"xap": "application/x-silverlight-app",
"xbm": "image/x-xbitmap",
"xdp": "application/vnd.adobe.xdp",
"xdr": "text/xml",
"xfd": "application/vnd.adobe.xfd",
"xfdf": "application/vnd.adobe.xfdf",
"xhtml": "text/html",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlw": "application/x-xlw",
"xml": "text/xml",
"xpl": "audio/scpls",
"xpm": "image/x-xpixmap",
"xq": "text/xml",
"xql": "text/xml",
"xquery": "text/xml",
"xsd": "text/xml",
"xsl": "text/xml",
"xslt": "text/xml",
"xwd": "application/x-xwd",
"yaml": "text/yaml",
"yml": "text/yaml",
"zip": "application/zip",
"dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
"wps": "application/vnd.ms-works",
"wpt": "x-lml/x-gps",
"pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12",
"heic": "image/heic",
"mkv": "video/x-matroska",
"raw": "image/x-panasonic-raw",
"webp": "image/webp",
"3gp": "audio/3gpp",
"3g2": "audio/3gpp2",
"weba": "audio/webm",
"woff2": "font/woff2",
}

+ 407
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_base.go View File

@@ -0,0 +1,407 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
"time"
)

// Bucket defines bucket properties
type Bucket struct {
XMLName xml.Name `xml:"Bucket"`
Name string `xml:"Name"`
CreationDate time.Time `xml:"CreationDate"`
Location string `xml:"Location"`
BucketType string `xml:"BucketType,omitempty"`
}

// Owner defines owner properties
type Owner struct {
XMLName xml.Name `xml:"Owner"`
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName,omitempty"`
}

// Initiator defines initiator properties
type Initiator struct {
XMLName xml.Name `xml:"Initiator"`
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName,omitempty"`
}

type bucketLocationObs struct {
XMLName xml.Name `xml:"Location"`
Location string `xml:",chardata"`
}

// BucketLocation defines bucket location configuration
type BucketLocation struct {
XMLName xml.Name `xml:"CreateBucketConfiguration"`
Location string `xml:"LocationConstraint,omitempty"`
}

// BucketStoragePolicy defines the bucket storage class
type BucketStoragePolicy struct {
XMLName xml.Name `xml:"StoragePolicy"`
StorageClass StorageClassType `xml:"DefaultStorageClass"`
}

type bucketStoragePolicyObs struct {
XMLName xml.Name `xml:"StorageClass"`
StorageClass string `xml:",chardata"`
}

// Content defines the object content properties
type Content struct {
XMLName xml.Name `xml:"Contents"`
Owner Owner `xml:"Owner"`
ETag string `xml:"ETag"`
Key string `xml:"Key"`
LastModified time.Time `xml:"LastModified"`
Size int64 `xml:"Size"`
StorageClass StorageClassType `xml:"StorageClass"`
}

// Version defines the properties of versioning objects
type Version struct {
DeleteMarker
XMLName xml.Name `xml:"Version"`
ETag string `xml:"ETag"`
Size int64 `xml:"Size"`
}

// DeleteMarker defines the properties of versioning delete markers
type DeleteMarker struct {
XMLName xml.Name `xml:"DeleteMarker"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
IsLatest bool `xml:"IsLatest"`
LastModified time.Time `xml:"LastModified"`
Owner Owner `xml:"Owner"`
StorageClass StorageClassType `xml:"StorageClass"`
}

// Upload defines multipart upload properties
type Upload struct {
XMLName xml.Name `xml:"Upload"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
Initiated time.Time `xml:"Initiated"`
StorageClass StorageClassType `xml:"StorageClass"`
Owner Owner `xml:"Owner"`
Initiator Initiator `xml:"Initiator"`
}

// BucketQuota defines bucket quota configuration
type BucketQuota struct {
XMLName xml.Name `xml:"Quota"`
Quota int64 `xml:"StorageQuota"`
}

// Grantee defines grantee properties
type Grantee struct {
XMLName xml.Name `xml:"Grantee"`
Type GranteeType `xml:"type,attr"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
URI GroupUriType `xml:"URI,omitempty"`
}

type granteeObs struct {
XMLName xml.Name `xml:"Grantee"`
Type GranteeType `xml:"type,attr"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
Canned string `xml:"Canned,omitempty"`
}

// Grant defines grant properties
type Grant struct {
XMLName xml.Name `xml:"Grant"`
Grantee Grantee `xml:"Grantee"`
Permission PermissionType `xml:"Permission"`
Delivered bool `xml:"Delivered"`
}

type grantObs struct {
XMLName xml.Name `xml:"Grant"`
Grantee granteeObs `xml:"Grantee"`
Permission PermissionType `xml:"Permission"`
Delivered bool `xml:"Delivered"`
}

// AccessControlPolicy defines access control policy properties
type AccessControlPolicy struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
Grants []Grant `xml:"AccessControlList>Grant"`
Delivered string `xml:"Delivered,omitempty"`
}

type accessControlPolicyObs struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
Grants []grantObs `xml:"AccessControlList>Grant"`
}

// CorsRule defines the CORS rules
type CorsRule struct {
XMLName xml.Name `xml:"CORSRule"`
ID string `xml:"ID,omitempty"`
AllowedOrigin []string `xml:"AllowedOrigin"`
AllowedMethod []string `xml:"AllowedMethod"`
AllowedHeader []string `xml:"AllowedHeader,omitempty"`
MaxAgeSeconds int `xml:"MaxAgeSeconds"`
ExposeHeader []string `xml:"ExposeHeader,omitempty"`
}

// BucketCors defines the bucket CORS configuration
type BucketCors struct {
XMLName xml.Name `xml:"CORSConfiguration"`
CorsRules []CorsRule `xml:"CORSRule"`
}

// BucketVersioningConfiguration defines the versioning configuration
type BucketVersioningConfiguration struct {
XMLName xml.Name `xml:"VersioningConfiguration"`
Status VersioningStatusType `xml:"Status"`
}

// IndexDocument defines the default page configuration
type IndexDocument struct {
Suffix string `xml:"Suffix"`
}

// ErrorDocument defines the error page configuration
type ErrorDocument struct {
Key string `xml:"Key,omitempty"`
}

// Condition defines condition in RoutingRule
type Condition struct {
XMLName xml.Name `xml:"Condition"`
KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"`
HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"`
}

// Redirect defines redirect in RoutingRule
type Redirect struct {
XMLName xml.Name `xml:"Redirect"`
Protocol ProtocolType `xml:"Protocol,omitempty"`
HostName string `xml:"HostName,omitempty"`
ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"`
ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"`
HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"`
}

// RoutingRule defines routing rules
type RoutingRule struct {
XMLName xml.Name `xml:"RoutingRule"`
Condition Condition `xml:"Condition,omitempty"`
Redirect Redirect `xml:"Redirect"`
}

// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration
type RedirectAllRequestsTo struct {
XMLName xml.Name `xml:"RedirectAllRequestsTo"`
Protocol ProtocolType `xml:"Protocol,omitempty"`
HostName string `xml:"HostName"`
}

// BucketWebsiteConfiguration defines the bucket website configuration
type BucketWebsiteConfiguration struct {
XMLName xml.Name `xml:"WebsiteConfiguration"`
RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
IndexDocument IndexDocument `xml:"IndexDocument,omitempty"`
ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"`
RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
}

// BucketLoggingStatus defines the bucket logging configuration
type BucketLoggingStatus struct {
XMLName xml.Name `xml:"BucketLoggingStatus"`
Agency string `xml:"Agency,omitempty"`
TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"`
TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"`
TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"`
}

// Transition defines transition property in LifecycleRule
type Transition struct {
XMLName xml.Name `xml:"Transition" json:"-"`
Date time.Time `xml:"Date,omitempty"`
Days int `xml:"Days,omitempty"`
StorageClass StorageClassType `xml:"StorageClass"`
}

// Expiration defines expiration property in LifecycleRule
type Expiration struct {
XMLName xml.Name `xml:"Expiration" json:"-"`
Date time.Time `xml:"Date,omitempty"`
Days int `xml:"Days,omitempty"`
ExpiredObjectDeleteMarker string `xml:"ExpiredObjectDeleteMarker,omitempty"`
}

// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule
type NoncurrentVersionTransition struct {
XMLName xml.Name `xml:"NoncurrentVersionTransition" json:"-"`
NoncurrentDays int `xml:"NoncurrentDays"`
StorageClass StorageClassType `xml:"StorageClass"`
}

// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule
type NoncurrentVersionExpiration struct {
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
NoncurrentDays int `xml:"NoncurrentDays"`
}

// AbortIncompleteMultipartUpload defines abortIncomplete expiration property in LifecycleRule
type AbortIncompleteMultipartUpload struct {
XMLName xml.Name `xml:"AbortIncompleteMultipartUpload" json:"-"`
DaysAfterInitiation int `xml:"DaysAfterInitiation"`
}

// LifecycleRule defines lifecycle rule
type LifecycleRule struct {
ID string `xml:"ID,omitempty"`
Prefix string `xml:"Prefix"`
Status RuleStatusType `xml:"Status"`
Transitions []Transition `xml:"Transition,omitempty"`
Expiration Expiration `xml:"Expiration,omitempty"`
NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
Filter LifecycleFilter `xml:"Filter,omitempty"`
}

type LifecycleFilter struct {
XMLName xml.Name `xml:"Filter" json:"-"`
Prefix string `xml:"And>Prefix,omitempty"`
Tags []Tag `xml:"And>Tag,omitempty"`
}

// BucketEncryptionConfiguration defines the bucket encryption configuration
type BucketEncryptionConfiguration struct {
XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
SSEAlgorithm string `xml:"Rule>ApplyServerSideEncryptionByDefault>SSEAlgorithm"`
KMSMasterKeyID string `xml:"Rule>ApplyServerSideEncryptionByDefault>KMSMasterKeyID,omitempty"`
ProjectID string `xml:"Rule>ApplyServerSideEncryptionByDefault>ProjectID,omitempty"`
}

// Tag defines tag property in BucketTagging
type Tag struct {
XMLName xml.Name `xml:"Tag"`
Key string `xml:"Key"`
Value string `xml:"Value"`
}

// BucketTagging defines the bucket tag configuration
type BucketTagging struct {
XMLName xml.Name `xml:"Tagging"`
Tags []Tag `xml:"TagSet>Tag"`
}

// FilterRule defines filter rule in TopicConfiguration
type FilterRule struct {
XMLName xml.Name `xml:"FilterRule"`
Name string `xml:"Name,omitempty"`
Value string `xml:"Value,omitempty"`
}

// TopicConfiguration defines the topic configuration
type TopicConfiguration struct {
XMLName xml.Name `xml:"TopicConfiguration"`
ID string `xml:"Id,omitempty"`
Topic string `xml:"Topic"`
Events []EventType `xml:"Event"`
FilterRules []FilterRule `xml:"Filter>Object>FilterRule"`
}

// BucketNotification defines the bucket notification configuration
type BucketNotification struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"`
}

type topicConfigurationS3 struct {
XMLName xml.Name `xml:"TopicConfiguration"`
ID string `xml:"Id,omitempty"`
Topic string `xml:"Topic"`
Events []string `xml:"Event"`
FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"`
}

type bucketNotificationS3 struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"`
}

// ObjectToDelete defines the object property in DeleteObjectsInput
type ObjectToDelete struct {
XMLName xml.Name `xml:"Object"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId,omitempty"`
}

// Deleted defines the deleted property in DeleteObjectsOutput
type Deleted struct {
XMLName xml.Name `xml:"Deleted"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
DeleteMarker bool `xml:"DeleteMarker"`
DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"`
}

// Part defines the part properties
type Part struct {
XMLName xml.Name `xml:"Part"`
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
LastModified time.Time `xml:"LastModified,omitempty"`
Size int64 `xml:"Size,omitempty"`
}

// BucketPayer defines the request payment configuration
type BucketPayer struct {
XMLName xml.Name `xml:"RequestPaymentConfiguration"`
Payer PayerType `xml:"Payer"`
}

type PublicAccessBlockConfiguration struct {
XMLName xml.Name `xml:"PublicAccessBlockConfiguration"`
BlockPublicAcls bool `xml:"BlockPublicAcls"`
IgnorePublicAcls bool `xml:"IgnorePublicAcls"`
BlockPublicPolicy bool `xml:"BlockPublicPolicy"`
RestrictPublicBuckets bool `xml:"RestrictPublicBuckets"`
}

type PolicyPublicStatus struct {
XMLName xml.Name `xml:"PolicyStatus"`
IsPublic bool `xml:"IsPublic"`
}

type BucketPublicStatus struct {
XMLName xml.Name `xml:"BucketStatus"`
IsPublic bool `xml:"IsPublic"`
}

// HttpHeader defines the standard metadata
type HttpHeader struct {
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
HttpExpires string
}

+ 437
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_bucket.go View File

@@ -0,0 +1,437 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
)

// DeleteBucketCustomDomainInput is the input parameter of DeleteBucketCustomDomain function
type DeleteBucketCustomDomainInput struct {
Bucket string
CustomDomain string
}

// GetBucketCustomDomainOutput is the result of GetBucketCustomDomain function
type GetBucketCustomDomainOutput struct {
BaseModel
Domains []Domain `xml:"Domains"`
}

type CustomDomainConfiguration struct {
Name string `xml:"Name"`
CertificateId string `xml:"CertificateId,omitempty"`
Certificate string `xml:"Certificate"`
CertificateChain string `xml:"CertificateChain,omitempty"`
PrivateKey string `xml:"PrivateKey"`
}

type SetBucketCustomDomainInput struct {
Bucket string
CustomDomain string
CustomDomainConfiguration *CustomDomainConfiguration `json:"customDomainConfiguration"` //optional
}

// GetBucketMirrorBackToSourceOutput is the result of GetBucketMirrorBackToSource function
type GetBucketMirrorBackToSourceOutput struct {
BaseModel
Rules string `json:"body"`
}

type SetBucketMirrorBackToSourceInput struct {
Bucket string
Rules string `json:"body"`
}

// Content defines the object content properties
type Domain struct {
DomainName string `xml:"DomainName"`
CreateTime string `xml:"CreateTime"`
CertificateId string `xml:"CertificateId"`
}

// ListBucketsInput is the input parameter of ListBuckets function
type ListBucketsInput struct {
QueryLocation bool
BucketType BucketType
MaxKeys int
Marker string
}

// ListBucketsOutput is the result of ListBuckets function
type ListBucketsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Owner Owner `xml:"Owner"`
Buckets []Bucket `xml:"Buckets>Bucket"`
IsTruncated bool `xml:"IsTruncated"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxKeys int `xml:"MaxKeys"`
}

// CreateBucketInput is the input parameter of CreateBucket function
type CreateBucketInput struct {
BucketLocation
Bucket string `xml:"-"`
ACL AclType `xml:"-"`
StorageClass StorageClassType `xml:"-"`
GrantReadId string `xml:"-"`
GrantWriteId string `xml:"-"`
GrantReadAcpId string `xml:"-"`
GrantWriteAcpId string `xml:"-"`
GrantFullControlId string `xml:"-"`
GrantReadDeliveredId string `xml:"-"`
GrantFullControlDeliveredId string `xml:"-"`
Epid string `xml:"-"`
AvailableZone string `xml:"-"`
IsFSFileInterface bool `xml:"-"`
BucketRedundancy BucketRedundancyType `xml:"-"`
IsFusionAllowUpgrade bool `xml:"-"`
IsRedundancyAllowALT bool `xml:"-"`
}

// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function
type SetBucketStoragePolicyInput struct {
Bucket string `xml:"-"`
BucketStoragePolicy
}

type getBucketStoragePolicyOutputS3 struct {
BaseModel
BucketStoragePolicy
}

// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function
type GetBucketStoragePolicyOutput struct {
BaseModel
StorageClass string
}

type getBucketStoragePolicyOutputObs struct {
BaseModel
bucketStoragePolicyObs
}

// SetBucketQuotaInput is the input parameter of SetBucketQuota function
type SetBucketQuotaInput struct {
Bucket string `xml:"-"`
BucketQuota
}

// GetBucketQuotaOutput is the result of GetBucketQuota function
type GetBucketQuotaOutput struct {
BaseModel
BucketQuota
}

// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function
type GetBucketStorageInfoOutput struct {
BaseModel
XMLName xml.Name `xml:"GetBucketStorageInfoResult"`
Size int64 `xml:"Size"`
ObjectNumber int `xml:"ObjectNumber"`
}

type getBucketLocationOutputS3 struct {
BaseModel
BucketLocation
}
type getBucketLocationOutputObs struct {
BaseModel
bucketLocationObs
}

// GetBucketLocationOutput is the result of GetBucketLocation function
type GetBucketLocationOutput struct {
BaseModel
Location string `xml:"-"`
}

// GetBucketAclOutput is the result of GetBucketAcl function
type GetBucketAclOutput struct {
BaseModel
AccessControlPolicy
}

type getBucketACLOutputObs struct {
BaseModel
accessControlPolicyObs
}

// SetBucketAclInput is the input parameter of SetBucketAcl function
type SetBucketAclInput struct {
Bucket string `xml:"-"`
ACL AclType `xml:"-"`
AccessControlPolicy
}

// SetBucketPolicyInput is the input parameter of SetBucketPolicy function
type SetBucketPolicyInput struct {
Bucket string
Policy string
}

// GetBucketPolicyOutput is the result of GetBucketPolicy function
type GetBucketPolicyOutput struct {
BaseModel
Policy string `json:"body"`
}

// SetBucketCorsInput is the input parameter of SetBucketCors function
type SetBucketCorsInput struct {
Bucket string `xml:"-"`
BucketCors
EnableSha256 bool `xml:"-"`
}

// GetBucketCorsOutput is the result of GetBucketCors function
type GetBucketCorsOutput struct {
BaseModel
BucketCors
}

// SetBucketVersioningInput is the input parameter of SetBucketVersioning function
type SetBucketVersioningInput struct {
Bucket string `xml:"-"`
BucketVersioningConfiguration
}

// GetBucketVersioningOutput is the result of GetBucketVersioning function
type GetBucketVersioningOutput struct {
BaseModel
BucketVersioningConfiguration
}

// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function
type SetBucketWebsiteConfigurationInput struct {
Bucket string `xml:"-"`
BucketWebsiteConfiguration
}

// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function
type GetBucketWebsiteConfigurationOutput struct {
BaseModel
BucketWebsiteConfiguration
}

// GetBucketMetadataInput is the input parameter of GetBucketMetadata function
type GetBucketMetadataInput struct {
Bucket string
Origin string
RequestHeader string
}

// GetBucketMetadataOutput is the result of GetBucketMetadata function
type GetBucketMetadataOutput struct {
BaseModel
StorageClass StorageClassType
Location string
Version string
AllowOrigin string
AllowMethod string
AllowHeader string
MaxAgeSeconds int
ExposeHeader string
Epid string
AZRedundancy AvailableZoneType
FSStatus FSStatusType
BucketRedundancy BucketRedundancyType
}

// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function
type SetBucketLoggingConfigurationInput struct {
Bucket string `xml:"-"`
BucketLoggingStatus
}

// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function
type GetBucketLoggingConfigurationOutput struct {
BaseModel
BucketLoggingStatus
}

// BucketLifecycleConfiguration defines the bucket lifecycle configuration
type BucketLifecycleConfiguration struct {
XMLName xml.Name `xml:"LifecycleConfiguration" json:"-"`
LifecycleRules []LifecycleRule `xml:"Rule"`
}

// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function
type SetBucketLifecycleConfigurationInput struct {
Bucket string `xml:"-"`
BucketLifecycleConfiguration
EnableSha256 bool `xml:"-"`
}

// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function
type GetBucketLifecycleConfigurationOutput struct {
BaseModel
BucketLifecycleConfiguration
}

// SetBucketEncryptionInput is the input parameter of SetBucketEncryption function
type SetBucketEncryptionInput struct {
Bucket string `xml:"-"`
BucketEncryptionConfiguration
}

// GetBucketEncryptionOutput is the result of GetBucketEncryption function
type GetBucketEncryptionOutput struct {
BaseModel
BucketEncryptionConfiguration
}

// SetBucketTaggingInput is the input parameter of SetBucketTagging function
type SetBucketTaggingInput struct {
Bucket string `xml:"-"`
BucketTagging
EnableSha256 bool `xml:"-"`
}

// GetBucketTaggingOutput is the result of GetBucketTagging function
type GetBucketTaggingOutput struct {
BaseModel
BucketTagging
}

// SetBucketNotificationInput is the input parameter of SetBucketNotification function
type SetBucketNotificationInput struct {
Bucket string `xml:"-"`
BucketNotification
}

type getBucketNotificationOutputS3 struct {
BaseModel
bucketNotificationS3
}

// GetBucketNotificationOutput is the result of GetBucketNotification function
type GetBucketNotificationOutput struct {
BaseModel
BucketNotification
}

// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function
type SetBucketFetchPolicyInput struct {
Bucket string
Status FetchPolicyStatusType `json:"status"`
Agency string `json:"agency"`
}

// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function
type GetBucketFetchPolicyInput struct {
Bucket string
}

// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function
type GetBucketFetchPolicyOutput struct {
BaseModel
FetchResponse `json:"fetch"`
}

// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function
type DeleteBucketFetchPolicyInput struct {
Bucket string
}

// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function
type SetBucketFetchJobInput struct {
Bucket string `json:"bucket"`
URL string `json:"url"`
Host string `json:"host,omitempty"`
Key string `json:"key,omitempty"`
Md5 string `json:"md5,omitempty"`
CallBackURL string `json:"callbackurl,omitempty"`
CallBackBody string `json:"callbackbody,omitempty"`
CallBackBodyType string `json:"callbackbodytype,omitempty"`
CallBackHost string `json:"callbackhost,omitempty"`
FileType string `json:"file_type,omitempty"`
IgnoreSameKey bool `json:"ignore_same_key,omitempty"`
ObjectHeaders map[string]string `json:"objectheaders,omitempty"`
Etag string `json:"etag,omitempty"`
TrustName string `json:"trustname,omitempty"`
}

// SetBucketFetchJobOutput is the result of SetBucketFetchJob function
type SetBucketFetchJobOutput struct {
BaseModel
SetBucketFetchJobResponse
}

// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function
type GetBucketFetchJobInput struct {
Bucket string
JobID string
}

// GetBucketFetchJobOutput is the result of GetBucketFetchJob function
type GetBucketFetchJobOutput struct {
BaseModel
GetBucketFetchJobResponse
}

type GetBucketFSStatusInput struct {
GetBucketMetadataInput
}

type GetBucketFSStatusOutput struct {
GetBucketMetadataOutput
FSStatus FSStatusType
}

type SetDirAccesslabelInput struct {
BaseDirAccesslabelInput
Accesslabel []string
}

type GetDirAccesslabelInput struct {
BaseDirAccesslabelInput
}

type GetDirAccesslabelOutput struct {
BaseModel
Accesslabel []string
}

type DeleteDirAccesslabelInput struct {
BaseDirAccesslabelInput
Accesslabel []string
}

type BaseDirAccesslabelInput struct {
Bucket string
Key string
Accesslabel []string
}

// PutBucketPublicAccessBlockInput is the input parameter of PutBucketPublicAccessBlock function
type PutBucketPublicAccessBlockInput struct {
Bucket string `xml:"-"`
PublicAccessBlockConfiguration
}

type GetBucketPublicAccessBlockOutput struct {
BaseModel
PublicAccessBlockConfiguration
}

type GetBucketPublicStatusOutput struct {
BaseModel
BucketPublicStatus
}

type GetBucketPolicyPublicStatusOutput struct {
BaseModel
PolicyPublicStatus
}

+ 33
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_header.go View File

@@ -0,0 +1,33 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

// ISseHeader defines the sse encryption header
type ISseHeader interface {
GetEncryption() string
GetKey() string
}

// SseKmsHeader defines the SseKms header
type SseKmsHeader struct {
Encryption string
Key string
isObs bool
}

// SseCHeader defines the SseC header
type SseCHeader struct {
Encryption string
Key string
KeyMD5 string
}

+ 433
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_object.go View File

@@ -0,0 +1,433 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
"io"
"time"
)

// ListObjsInput defines parameters for listing objects
type ListObjsInput struct {
Prefix string
MaxKeys int
Delimiter string
Origin string
RequestHeader string
EncodingType string
}

// ListObjectsInput is the input parameter of ListObjects function
type ListObjectsInput struct {
ListObjsInput
Bucket string
Marker string
}

type ListPosixObjectsInput struct {
ListObjectsInput
}

// ListObjectsOutput is the result of ListObjects function
type ListObjectsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListBucketResult"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxKeys int `xml:"MaxKeys"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Contents []Content `xml:"Contents"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
Location string `xml:"-"`
EncodingType string `xml:"EncodingType,omitempty"`
}

type ListPosixObjectsOutput struct {
ListObjectsOutput
CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"`
}

type CommonPrefix struct {
XMLName xml.Name `xml:"CommonPrefixes"`
Prefix string `xml:"Prefix"`
MTime string `xml:"MTime"`
Mode string `xml:"Mode"`
InodeNo string `xml:"InodeNo"`
LastModified time.Time `xml:"LastModified"`
}

// ListVersionsInput is the input parameter of ListVersions function
type ListVersionsInput struct {
ListObjsInput
Bucket string
KeyMarker string
VersionIdMarker string
}

// ListVersionsOutput is the result of ListVersions function
type ListVersionsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListVersionsResult"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
KeyMarker string `xml:"KeyMarker"`
NextKeyMarker string `xml:"NextKeyMarker"`
VersionIdMarker string `xml:"VersionIdMarker"`
NextVersionIdMarker string `xml:"NextVersionIdMarker"`
MaxKeys int `xml:"MaxKeys"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
Versions []Version `xml:"Version"`
DeleteMarkers []DeleteMarker `xml:"DeleteMarker"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
Location string `xml:"-"`
EncodingType string `xml:"EncodingType,omitempty"`
}

// DeleteObjectInput is the input parameter of DeleteObject function
type DeleteObjectInput struct {
Bucket string
Key string
VersionId string
}

// DeleteObjectOutput is the result of DeleteObject function
type DeleteObjectOutput struct {
BaseModel
VersionId string
DeleteMarker bool
}

// DeleteObjectsInput is the input parameter of DeleteObjects function
type DeleteObjectsInput struct {
Bucket string `xml:"-"`
XMLName xml.Name `xml:"Delete"`
Quiet bool `xml:"Quiet,omitempty"`
Objects []ObjectToDelete `xml:"Object"`
EncodingType string `xml:"EncodingType"`
}

// DeleteObjectsOutput is the result of DeleteObjects function
type DeleteObjectsOutput struct {
BaseModel
XMLName xml.Name `xml:"DeleteResult"`
Deleteds []Deleted `xml:"Deleted"`
Errors []Error `xml:"Error"`
EncodingType string `xml:"EncodingType,omitempty"`
}

// SetObjectAclInput is the input parameter of SetObjectAcl function
type SetObjectAclInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
VersionId string `xml:"-"`
ACL AclType `xml:"-"`
AccessControlPolicy
}

// GetObjectAclInput is the input parameter of GetObjectAcl function
type GetObjectAclInput struct {
Bucket string
Key string
VersionId string
}

// GetObjectAclOutput is the result of GetObjectAcl function
type GetObjectAclOutput struct {
BaseModel
VersionId string
AccessControlPolicy
}

// RestoreObjectInput is the input parameter of RestoreObject function
type RestoreObjectInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
VersionId string `xml:"-"`
XMLName xml.Name `xml:"RestoreRequest"`
Days int `xml:"Days"`
Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"`
}

// GetObjectMetadataInput is the input parameter of GetObjectMetadata function
type GetObjectMetadataInput struct {
Bucket string
Key string
VersionId string
Origin string
RequestHeader string
SseHeader ISseHeader
}

// GetObjectMetadataOutput is the result of GetObjectMetadata function
type GetObjectMetadataOutput struct {
BaseModel
HttpHeader
VersionId string
WebsiteRedirectLocation string
Expiration string
Restore string
ObjectType string
NextAppendPosition string
StorageClass StorageClassType
ContentLength int64
ETag string
AllowOrigin string
AllowHeader string
AllowMethod string
ExposeHeader string
MaxAgeSeconds int
LastModified time.Time
SseHeader ISseHeader
Metadata map[string]string
}

type GetAttributeInput struct {
GetObjectMetadataInput
RequestPayer string
}

type GetAttributeOutput struct {
GetObjectMetadataOutput
Mode int
}

// GetObjectInput is the input parameter of GetObject function
type GetObjectInput struct {
GetObjectMetadataInput
IfMatch string
IfNoneMatch string
AcceptEncoding string
IfUnmodifiedSince time.Time
IfModifiedSince time.Time
RangeStart int64
RangeEnd int64
Range string
ImageProcess string
ResponseCacheControl string
ResponseContentDisposition string
ResponseContentEncoding string
ResponseContentLanguage string
ResponseContentType string
ResponseExpires string
}

// GetObjectOutput is the result of GetObject function
type GetObjectOutput struct {
GetObjectMetadataOutput
DeleteMarker bool
Expires string
Body io.ReadCloser
}

// ObjectOperationInput defines the object operation properties
type ObjectOperationInput struct {
Bucket string
Key string
ACL AclType
GrantReadId string
GrantReadAcpId string
GrantWriteAcpId string
GrantFullControlId string
StorageClass StorageClassType
WebsiteRedirectLocation string
Expires int64
SseHeader ISseHeader
Metadata map[string]string
}

// PutObjectBasicInput defines the basic object operation properties
type PutObjectBasicInput struct {
ObjectOperationInput
HttpHeader
ContentMD5 string
ContentSHA256 string
ContentLength int64
}

// PutObjectInput is the input parameter of PutObject function
type PutObjectInput struct {
PutObjectBasicInput
Body io.Reader
}

type NewFolderInput struct {
ObjectOperationInput
RequestPayer string
}

type NewFolderOutput struct {
PutObjectOutput
}

// PutFileInput is the input parameter of PutFile function
type PutFileInput struct {
PutObjectBasicInput
SourceFile string
}

// PutObjectOutput is the result of PutObject function
type PutObjectOutput struct {
BaseModel
VersionId string
SseHeader ISseHeader
StorageClass StorageClassType
ETag string
ObjectUrl string
CallbackBody
}

// CopyObjectInput is the input parameter of CopyObject function
type CopyObjectInput struct {
ObjectOperationInput
CopySourceBucket string
CopySourceKey string
CopySourceVersionId string
CopySourceIfMatch string
CopySourceIfNoneMatch string
CopySourceIfUnmodifiedSince time.Time
CopySourceIfModifiedSince time.Time
SourceSseHeader ISseHeader
Expires string
MetadataDirective MetadataDirectiveType
SuccessActionRedirect string
HttpHeader
}

// CopyObjectOutput is the result of CopyObject function
type CopyObjectOutput struct {
BaseModel
CopySourceVersionId string `xml:"-"`
VersionId string `xml:"-"`
SseHeader ISseHeader `xml:"-"`
XMLName xml.Name `xml:"CopyObjectResult"`
LastModified time.Time `xml:"LastModified"`
ETag string `xml:"ETag"`
}

// UploadFileInput is the input parameter of UploadFile function
type UploadFileInput struct {
ObjectOperationInput
UploadFile string
PartSize int64
TaskNum int
EnableCheckpoint bool
CheckpointFile string
EncodingType string
HttpHeader
}

// DownloadFileInput is the input parameter of DownloadFile function
type DownloadFileInput struct {
GetObjectMetadataInput
IfMatch string
IfNoneMatch string
IfModifiedSince time.Time
IfUnmodifiedSince time.Time
DownloadFile string
PartSize int64
TaskNum int
EnableCheckpoint bool
CheckpointFile string
}

type AppendObjectInput struct {
PutObjectBasicInput
Body io.Reader
Position int64
}

type AppendObjectOutput struct {
BaseModel
VersionId string
SseHeader ISseHeader
NextAppendPosition int64
ETag string
}

type ModifyObjectInput struct {
Bucket string
Key string
Position int64
Body io.Reader
ContentLength int64
}

type ModifyObjectOutput struct {
BaseModel
ETag string
}

// HeadObjectInput is the input parameter of HeadObject function
type HeadObjectInput struct {
Bucket string
Key string
VersionId string
}

type RenameFileInput struct {
Bucket string
Key string
NewObjectKey string
RequestPayer string
}

type RenameFileOutput struct {
BaseModel
}

type RenameFolderInput struct {
Bucket string
Key string
NewObjectKey string
RequestPayer string
}

type RenameFolderOutput struct {
BaseModel
}

// SetObjectMetadataInput is the input parameter of SetObjectMetadata function
type SetObjectMetadataInput struct {
Bucket string
Key string
VersionId string
MetadataDirective MetadataDirectiveType
Expires string
WebsiteRedirectLocation string
StorageClass StorageClassType
Metadata map[string]string
HttpHeader
}

// SetObjectMetadataOutput is the result of SetObjectMetadata function
type SetObjectMetadataOutput struct {
BaseModel
MetadataDirective MetadataDirectiveType
HttpHeader
Expires string
WebsiteRedirectLocation string
StorageClass StorageClassType
Metadata map[string]string
}

type CallbackInput struct {
CallbackUrl string `json:"callbackUrl"`
CallbackHost string `json:"callbackHost,omitempty"`
CallbackBody string `json:"callbackBody"`
CallbackBodyType string `json:"callbackBodyType,omitempty"`
}

+ 73
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_other.go View File

@@ -0,0 +1,73 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"net/http"
)

// CreateSignedUrlInput is the input parameter of CreateSignedUrl function
type CreateSignedUrlInput struct {
Method HttpMethodType
Bucket string
Key string
Policy string
SubResource SubResourceType
Expires int
Headers map[string]string
QueryParams map[string]string
}

// CreateSignedUrlOutput is the result of CreateSignedUrl function
type CreateSignedUrlOutput struct {
SignedUrl string
ActualSignedRequestHeaders http.Header
}

// ConditionRange the specifying ranges in the conditions
type ConditionRange struct {
RangeName string
Lower int64
Upper int64
}

// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function.
type CreateBrowserBasedSignatureInput struct {
Bucket string
Key string
Expires int
FormParams map[string]string
RangeParams []ConditionRange
}

// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function.
type CreateBrowserBasedSignatureOutput struct {
OriginPolicy string
Policy string
Algorithm string
Credential string
Date string
Signature string
}

// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function
type SetBucketRequestPaymentInput struct {
Bucket string `xml:"-"`
BucketPayer
}

// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function
type GetBucketRequestPaymentOutput struct {
BaseModel
BucketPayer
}

+ 174
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_part.go View File

@@ -0,0 +1,174 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
"io"
"time"
)

// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function
type ListMultipartUploadsInput struct {
Bucket string
Prefix string
MaxUploads int
Delimiter string
KeyMarker string
UploadIdMarker string
EncodingType string
}

// ListMultipartUploadsOutput is the result of ListMultipartUploads function
type ListMultipartUploadsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
Bucket string `xml:"Bucket"`
KeyMarker string `xml:"KeyMarker"`
NextKeyMarker string `xml:"NextKeyMarker"`
UploadIdMarker string `xml:"UploadIdMarker"`
NextUploadIdMarker string `xml:"NextUploadIdMarker"`
Delimiter string `xml:"Delimiter"`
IsTruncated bool `xml:"IsTruncated"`
MaxUploads int `xml:"MaxUploads"`
Prefix string `xml:"Prefix"`
Uploads []Upload `xml:"Upload"`
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
EncodingType string `xml:"EncodingType,omitempty"`
}

// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function
type AbortMultipartUploadInput struct {
Bucket string
Key string
UploadId string
}

// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function
type InitiateMultipartUploadInput struct {
ObjectOperationInput
HttpHeader
EncodingType string
}

// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function
type InitiateMultipartUploadOutput struct {
BaseModel
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
SseHeader ISseHeader
EncodingType string `xml:"EncodingType,omitempty"`
}

// UploadPartInput is the input parameter of UploadPart function
type UploadPartInput struct {
Bucket string
Key string
PartNumber int
UploadId string
ContentMD5 string
ContentSHA256 string
SseHeader ISseHeader
Body io.Reader
SourceFile string
Offset int64
PartSize int64
}

// UploadPartOutput is the result of UploadPart function
type UploadPartOutput struct {
BaseModel
PartNumber int
ETag string
SseHeader ISseHeader
}

// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function
type CompleteMultipartUploadInput struct {
Bucket string `xml:"-"`
Key string `xml:"-"`
UploadId string `xml:"-"`
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []Part `xml:"Part"`
EncodingType string `xml:"-"`
}

// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function
type CompleteMultipartUploadOutput struct {
BaseModel
VersionId string `xml:"-"`
SseHeader ISseHeader `xml:"-"`
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
Location string `xml:"Location"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
ETag string `xml:"ETag"`
EncodingType string `xml:"EncodingType,omitempty"`
CallbackBody
}

// ListPartsInput is the input parameter of ListParts function
type ListPartsInput struct {
Bucket string
Key string
UploadId string
MaxParts int
PartNumberMarker int
EncodingType string
}

// ListPartsOutput is the result of ListParts function
type ListPartsOutput struct {
BaseModel
XMLName xml.Name `xml:"ListPartsResult"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId"`
PartNumberMarker int `xml:"PartNumberMarker"`
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
MaxParts int `xml:"MaxParts"`
IsTruncated bool `xml:"IsTruncated"`
StorageClass StorageClassType `xml:"StorageClass"`
Initiator Initiator `xml:"Initiator"`
Owner Owner `xml:"Owner"`
Parts []Part `xml:"Part"`
EncodingType string `xml:"EncodingType,omitempty"`
}

// CopyPartInput is the input parameter of CopyPart function
type CopyPartInput struct {
Bucket string
Key string
UploadId string
PartNumber int
CopySourceBucket string
CopySourceKey string
CopySourceVersionId string
CopySourceRangeStart int64
CopySourceRangeEnd int64
CopySourceRange string
SseHeader ISseHeader
SourceSseHeader ISseHeader
}

// CopyPartOutput is the result of CopyPart function
type CopyPartOutput struct {
BaseModel
XMLName xml.Name `xml:"CopyPartResult"`
PartNumber int `xml:"-"`
ETag string `xml:"ETag"`
LastModified time.Time `xml:"LastModified"`
SseHeader ISseHeader `xml:"-"`
}

+ 68
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/model_response.go View File

@@ -0,0 +1,68 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/xml"
)

// BaseModel defines base model response from OBS
type BaseModel struct {
StatusCode int `xml:"-"`
RequestId string `xml:"RequestId" json:"request_id"`
ResponseHeaders map[string][]string `xml:"-"`
}

// Error defines the error property in DeleteObjectsOutput
type Error struct {
XMLName xml.Name `xml:"Error"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}

// FetchResponse defines the response fetch policy configuration
type FetchResponse struct {
Status FetchPolicyStatusType `json:"status"`
Agency string `json:"agency"`
}

// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration
type SetBucketFetchJobResponse struct {
ID string `json:"id"`
Wait int `json:"Wait"`
}

// GetBucketFetchJobResponse defines the response fetch job configuration
type GetBucketFetchJobResponse struct {
Err string `json:"err"`
Code string `json:"code"`
Status string `json:"status"`
Job JobResponse `json:"job"`
}

// JobResponse defines the response job configuration
type JobResponse struct {
Bucket string `json:"bucket"`
URL string `json:"url"`
Host string `json:"host"`
Key string `json:"key"`
Md5 string `json:"md5"`
CallBackURL string `json:"callbackurl"`
CallBackBody string `json:"callbackbody"`
CallBackBodyType string `json:"callbackbodytype"`
CallBackHost string `json:"callbackhost"`
FileType string `json:"file_type"`
IgnoreSameKey bool `json:"ignore_same_key"`
}

+ 542
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/pool.go View File

@@ -0,0 +1,542 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

//nolint:structcheck, unused
package obs

import (
"errors"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
)

// Future defines interface with function: Get
type Future interface {
Get() interface{}
}

// FutureResult for task result
type FutureResult struct {
result interface{}
resultChan chan interface{}
lock sync.Mutex
}

type panicResult struct {
presult interface{}
}

func (f *FutureResult) checkPanic() interface{} {
if r, ok := f.result.(panicResult); ok {
panic(r.presult)
}
return f.result
}

// Get gets the task result
func (f *FutureResult) Get() interface{} {
if f.resultChan == nil {
return f.checkPanic()
}
f.lock.Lock()
defer f.lock.Unlock()
if f.resultChan == nil {
return f.checkPanic()
}

f.result = <-f.resultChan
close(f.resultChan)
f.resultChan = nil
return f.checkPanic()
}

// Task defines interface with function: Run
type Task interface {
Run() interface{}
}

type funcWrapper struct {
f func() interface{}
}

func (fw *funcWrapper) Run() interface{} {
if fw.f != nil {
return fw.f()
}
return nil
}

type taskWrapper struct {
t Task
f *FutureResult
}

func (tw *taskWrapper) Run() interface{} {
if tw.t != nil {
return tw.t.Run()
}
return nil
}

type signalTask struct {
id string
}

func (signalTask) Run() interface{} {
return nil
}

type worker struct {
name string
taskQueue chan Task
wg *sync.WaitGroup
pool *RoutinePool
}

func runTask(t Task) {
if tw, ok := t.(*taskWrapper); ok {
defer func() {
if r := recover(); r != nil {
tw.f.resultChan <- panicResult{
presult: r,
}
}
}()
ret := t.Run()
tw.f.resultChan <- ret
} else {
t.Run()
}
}

func (*worker) runTask(t Task) {
runTask(t)
}

func (w *worker) start() {
go func() {
defer func() {
if w.wg != nil {
w.wg.Done()
}
}()
for {
task, ok := <-w.taskQueue
if !ok {
break
}
w.pool.AddCurrentWorkingCnt(1)
w.runTask(task)
w.pool.AddCurrentWorkingCnt(-1)
if w.pool.autoTuneWorker(w) {
break
}
}
}()
}

func (w *worker) release() {
w.taskQueue = nil
w.wg = nil
w.pool = nil
}

// Pool defines coroutine pool interface
type Pool interface {
ShutDown()
Submit(t Task) (Future, error)
SubmitFunc(f func() interface{}) (Future, error)
Execute(t Task)
ExecuteFunc(f func() interface{})
GetMaxWorkerCnt() int64
AddMaxWorkerCnt(value int64) int64
GetCurrentWorkingCnt() int64
AddCurrentWorkingCnt(value int64) int64
GetWorkerCnt() int64
AddWorkerCnt(value int64) int64
EnableAutoTune()
}

type basicPool struct {
maxWorkerCnt int64
workerCnt int64
currentWorkingCnt int64
isShutDown int32
}

// ErrTaskInvalid will be returned if the task is nil
var ErrTaskInvalid = errors.New("Task is nil")

func (pool *basicPool) GetCurrentWorkingCnt() int64 {
return atomic.LoadInt64(&pool.currentWorkingCnt)
}

func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 {
return atomic.AddInt64(&pool.currentWorkingCnt, value)
}

func (pool *basicPool) GetWorkerCnt() int64 {
return atomic.LoadInt64(&pool.workerCnt)
}

func (pool *basicPool) AddWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.workerCnt, value)
}

func (pool *basicPool) GetMaxWorkerCnt() int64 {
return atomic.LoadInt64(&pool.maxWorkerCnt)
}

func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 {
return atomic.AddInt64(&pool.maxWorkerCnt, value)
}

func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool {
return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue)
}

func (pool *basicPool) EnableAutoTune() {

}

// RoutinePool defines the coroutine pool struct
type RoutinePool struct {
basicPool
taskQueue chan Task
dispatchQueue chan Task
workers map[string]*worker
cacheCnt int
wg *sync.WaitGroup
lock *sync.Mutex
shutDownWg *sync.WaitGroup
autoTune int32
}

// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function
var ErrSubmitTimeout = errors.New("Submit task timeout")

// ErrPoolShutDown will be returned if RoutinePool is shutdown
var ErrPoolShutDown = errors.New("RoutinePool is shutdown")

// ErrTaskReject will be returned if submit task is rejected
var ErrTaskReject = errors.New("Submit task is rejected")

var closeQueue = signalTask{id: "closeQueue"}

// NewRoutinePool creates a RoutinePool instance
func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}

pool := &RoutinePool{
cacheCnt: cacheCnt,
wg: new(sync.WaitGroup),
lock: new(sync.Mutex),
shutDownWg: new(sync.WaitGroup),
autoTune: 0,
}
pool.isShutDown = 0
pool.maxWorkerCnt += int64(maxWorkerCnt)
if pool.cacheCnt <= 0 {
pool.taskQueue = make(chan Task)
} else {
pool.taskQueue = make(chan Task, pool.cacheCnt)
}
pool.workers = make(map[string]*worker, pool.maxWorkerCnt)
// dispatchQueue must not have length
pool.dispatchQueue = make(chan Task)
pool.dispatcher()

return pool
}

// EnableAutoTune sets the autoTune enabled
func (pool *RoutinePool) EnableAutoTune() {
atomic.StoreInt32(&pool.autoTune, 1)
}

func (pool *RoutinePool) checkStatus(t Task) error {
if t == nil {
return ErrTaskInvalid
}

if atomic.LoadInt32(&pool.isShutDown) == 1 {
return ErrPoolShutDown
}
return nil
}

func (pool *RoutinePool) dispatcher() {
pool.shutDownWg.Add(1)
go func() {
for {
task, ok := <-pool.dispatchQueue
if !ok {
break
}

if task == closeQueue {
close(pool.taskQueue)
pool.shutDownWg.Done()
continue
}

if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() {
pool.addWorker()
}

pool.taskQueue <- task
}
}()
}

// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it
func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 {
if atomic.LoadInt32(&pool.autoTune) == 1 {
return pool.basicPool.AddMaxWorkerCnt(value)
}
return pool.GetMaxWorkerCnt()
}

func (pool *RoutinePool) addWorker() {
if atomic.LoadInt32(&pool.autoTune) == 1 {
pool.lock.Lock()
defer pool.lock.Unlock()
}
w := &worker{}
w.name = fmt.Sprintf("woker-%d", len(pool.workers))
w.taskQueue = pool.taskQueue
w.wg = pool.wg
pool.AddWorkerCnt(1)
w.pool = pool
pool.workers[w.name] = w
pool.wg.Add(1)
w.start()
}

func (pool *RoutinePool) autoTuneWorker(w *worker) bool {
if atomic.LoadInt32(&pool.autoTune) == 0 {
return false
}

if w == nil {
return false
}

workerCnt := pool.GetWorkerCnt()
maxWorkerCnt := pool.GetMaxWorkerCnt()
if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) {
pool.lock.Lock()
defer pool.lock.Unlock()
delete(pool.workers, w.name)
w.wg.Done()
w.release()
return true
}

return false
}

// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *RoutinePool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}

// Execute pushes the specified task to the dispatchQueue
func (pool *RoutinePool) Execute(t Task) {
if t != nil {
pool.dispatchQueue <- t
}
}

// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}

// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info
func (pool *RoutinePool) Submit(t Task) (Future, error) {
if err := pool.checkStatus(t); err != nil {
return nil, err
}
f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
pool.dispatchQueue <- tw
return f, nil
}

// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info.
// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time.
func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) {
if timeout <= 0 {
return pool.Submit(t)
}
if err := pool.checkStatus(t); err != nil {
return nil, err
}
timeoutChan := make(chan bool, 1)
go func() {
time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout)))
timeoutChan <- true
close(timeoutChan)
}()

f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}
select {
case pool.dispatchQueue <- tw:
return f, nil
case _, ok := <-timeoutChan:
if ok {
return nil, ErrSubmitTimeout
}
return nil, ErrSubmitTimeout
}
}

func (pool *RoutinePool) beforeCloseDispatchQueue() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.dispatchQueue <- closeQueue
pool.wg.Wait()
}

func (pool *RoutinePool) doCloseDispatchQueue() {
close(pool.dispatchQueue)
pool.shutDownWg.Wait()
}

// ShutDown closes the RoutinePool instance
func (pool *RoutinePool) ShutDown() {
pool.beforeCloseDispatchQueue()
pool.doCloseDispatchQueue()
for _, w := range pool.workers {
w.release()
}
pool.workers = nil
pool.taskQueue = nil
pool.dispatchQueue = nil
}

// NoChanPool defines the coroutine pool struct
type NoChanPool struct {
basicPool
wg *sync.WaitGroup
tokens chan interface{}
}

// NewNochanPool creates a new NoChanPool instance
func NewNochanPool(maxWorkerCnt int) Pool {
if maxWorkerCnt <= 0 {
maxWorkerCnt = runtime.NumCPU()
}

pool := &NoChanPool{
wg: new(sync.WaitGroup),
tokens: make(chan interface{}, maxWorkerCnt),
}
pool.isShutDown = 0
pool.AddMaxWorkerCnt(int64(maxWorkerCnt))

for i := 0; i < maxWorkerCnt; i++ {
pool.tokens <- struct{}{}
}

return pool
}

func (pool *NoChanPool) acquire() {
<-pool.tokens
}

func (pool *NoChanPool) release() {
pool.tokens <- 1
}

func (pool *NoChanPool) execute(t Task) {
pool.wg.Add(1)
go func() {
pool.acquire()
defer func() {
pool.release()
pool.wg.Done()
}()
runTask(t)
}()
}

// ShutDown closes the NoChanPool instance
func (pool *NoChanPool) ShutDown() {
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
return
}
pool.wg.Wait()
}

// Execute executes the specified task
func (pool *NoChanPool) Execute(t Task) {
if t != nil {
pool.execute(t)
}
}

// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
func (pool *NoChanPool) ExecuteFunc(f func() interface{}) {
fw := &funcWrapper{
f: f,
}
pool.Execute(fw)
}

// Submit executes the specified task, and returns the FutureResult and error info
func (pool *NoChanPool) Submit(t Task) (Future, error) {
if t == nil {
return nil, ErrTaskInvalid
}

f := &FutureResult{}
f.resultChan = make(chan interface{}, 1)
tw := &taskWrapper{
t: t,
f: f,
}

pool.execute(tw)
return f, nil
}

// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) {
fw := &funcWrapper{
f: f,
}
return pool.Submit(fw)
}

+ 108
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/progress.go View File

@@ -0,0 +1,108 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"io"
)

type ProgressEventType int

type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
EventType ProgressEventType
}

const (
TransferStartedEvent ProgressEventType = 1 + iota
TransferDataEvent
TransferCompletedEvent
TransferFailedEvent
)

func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
EventType: eventType,
}
}

type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}

type readerTracker struct {
completedBytes int64
}

// publishProgress
func publishProgress(listener ProgressListener, event *ProgressEvent) {
if listener != nil && event != nil {
listener.ProgressChanged(event)
}
}

type teeReader struct {
reader io.Reader
consumedBytes int64
totalBytes int64
tracker *readerTracker
listener ProgressListener
}

func TeeReader(reader io.Reader, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
return &teeReader{
reader: reader,
consumedBytes: 0,
totalBytes: totalBytes,
tracker: tracker,
listener: listener,
}
}

func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)

if err != nil && err != io.EOF {
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}

if n > 0 {
t.consumedBytes += int64(n)

if t.listener != nil {
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
publishProgress(t.listener, event)
}

if t.tracker != nil {
t.tracker.completedBytes = t.consumedBytes
}
}

return
}

func (r *teeReader) Size() int64 {
return r.totalBytes
}

func (t *teeReader) Close() error {
if rc, ok := t.reader.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}

+ 242
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/provider.go View File

@@ -0,0 +1,242 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"encoding/json"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"strings"
"sync"
"sync/atomic"
"time"
)

const (
accessKeyEnv = "OBS_ACCESS_KEY_ID"
securityKeyEnv = "OBS_SECRET_ACCESS_KEY"
securityTokenEnv = "OBS_SECURITY_TOKEN"
ecsRequestURL = "http://169.254.169.254/openstack/latest/securitykey"
)

type securityHolder struct {
ak string
sk string
securityToken string
}

var emptySecurityHolder = securityHolder{}

type securityProvider interface {
getSecurity() securityHolder
}

type BasicSecurityProvider struct {
val atomic.Value
}

func (bsp *BasicSecurityProvider) getSecurity() securityHolder {
if sh, ok := bsp.val.Load().(securityHolder); ok {
return sh
}
return emptySecurityHolder
}

func (bsp *BasicSecurityProvider) refresh(ak, sk, securityToken string) {
bsp.val.Store(securityHolder{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)})
}

func NewBasicSecurityProvider(ak, sk, securityToken string) *BasicSecurityProvider {
bsp := &BasicSecurityProvider{}
bsp.refresh(ak, sk, securityToken)
return bsp
}

type EnvSecurityProvider struct {
sh securityHolder
suffix string
once sync.Once
}

func (esp *EnvSecurityProvider) getSecurity() securityHolder {
//ensure run only once
esp.once.Do(func() {
esp.sh = securityHolder{
ak: strings.TrimSpace(os.Getenv(accessKeyEnv + esp.suffix)),
sk: strings.TrimSpace(os.Getenv(securityKeyEnv + esp.suffix)),
securityToken: strings.TrimSpace(os.Getenv(securityTokenEnv + esp.suffix)),
}
})

return esp.sh
}

func NewEnvSecurityProvider(suffix string) *EnvSecurityProvider {
if suffix != "" {
suffix = "_" + suffix
}
esp := &EnvSecurityProvider{
suffix: suffix,
}
return esp
}

type TemporarySecurityHolder struct {
securityHolder
expireDate time.Time
}

var emptyTemporarySecurityHolder = TemporarySecurityHolder{}

type EcsSecurityProvider struct {
val atomic.Value
lock sync.Mutex
httpClient *http.Client
prefetch int32
retryCount int
}

func (ecsSp *EcsSecurityProvider) loadTemporarySecurityHolder() (TemporarySecurityHolder, bool) {
if sh := ecsSp.val.Load(); sh == nil {
return emptyTemporarySecurityHolder, false
} else if _sh, ok := sh.(TemporarySecurityHolder); !ok {
return emptyTemporarySecurityHolder, false
} else {
return _sh, true
}
}

func (ecsSp *EcsSecurityProvider) getAndSetSecurityWithOutLock() securityHolder {
_sh := TemporarySecurityHolder{}
_sh.expireDate = time.Now().Add(time.Minute * 5)
retryCount := 0
for {
if req, err := http.NewRequest("GET", ecsRequestURL, nil); err == nil {
start := GetCurrentTimestamp()
res, err := ecsSp.httpClient.Do(req)
if err == nil {
if data, _err := ioutil.ReadAll(res.Body); _err == nil {
temp := &struct {
Credential struct {
AK string `json:"access,omitempty"`
SK string `json:"secret,omitempty"`
SecurityToken string `json:"securitytoken,omitempty"`
ExpireDate time.Time `json:"expires_at,omitempty"`
} `json:"credential"`
}{}

doLog(LEVEL_DEBUG, "Get the json data from ecs succeed")

if jsonErr := json.Unmarshal(data, temp); jsonErr == nil {
_sh.ak = temp.Credential.AK
_sh.sk = temp.Credential.SK
_sh.securityToken = temp.Credential.SecurityToken
_sh.expireDate = temp.Credential.ExpireDate.Add(time.Minute * -1)

doLog(LEVEL_INFO, "Get security from ecs succeed, AK:xxxx, SK:xxxx, SecurityToken:xxxx, ExprireDate %s", _sh.expireDate)

doLog(LEVEL_INFO, "Get security from ecs succeed, cost %d ms", (GetCurrentTimestamp() - start))
break
} else {
err = jsonErr
}
} else {
err = _err
}
}

doLog(LEVEL_WARN, "Try to get security from ecs failed, cost %d ms, err %s", (GetCurrentTimestamp() - start), err.Error())
}

if retryCount >= ecsSp.retryCount {
doLog(LEVEL_WARN, "Try to get security from ecs failed and exceed the max retry count")
break
}
sleepTime := float64(retryCount+2) * rand.Float64()
if sleepTime > 10 {
sleepTime = 10
}
time.Sleep(time.Duration(sleepTime * float64(time.Second)))
retryCount++
}

ecsSp.val.Store(_sh)
return _sh.securityHolder
}

func (ecsSp *EcsSecurityProvider) getAndSetSecurity() securityHolder {
ecsSp.lock.Lock()
defer ecsSp.lock.Unlock()
tsh, succeed := ecsSp.loadTemporarySecurityHolder()
if !succeed || time.Now().After(tsh.expireDate) {
return ecsSp.getAndSetSecurityWithOutLock()
}
return tsh.securityHolder
}

func (ecsSp *EcsSecurityProvider) getSecurity() securityHolder {
if tsh, succeed := ecsSp.loadTemporarySecurityHolder(); succeed {
if time.Now().Before(tsh.expireDate) {
//not expire
if time.Now().Add(time.Minute*5).After(tsh.expireDate) && atomic.CompareAndSwapInt32(&ecsSp.prefetch, 0, 1) {
//do prefetch
sh := ecsSp.getAndSetSecurityWithOutLock()
atomic.CompareAndSwapInt32(&ecsSp.prefetch, 1, 0)
return sh
}
return tsh.securityHolder
}
return ecsSp.getAndSetSecurity()
}

return ecsSp.getAndSetSecurity()
}

func getInternalTransport() *http.Transport {
timeout := 10
transport := &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
start := GetCurrentTimestamp()
conn, err := (&net.Dialer{
Timeout: time.Second * time.Duration(timeout),
Resolver: net.DefaultResolver,
}).Dial(network, addr)

if isInfoLogEnabled() {
doLog(LEVEL_INFO, "Do http dial cost %d ms", (GetCurrentTimestamp() - start))
}
if err != nil {
return nil, err
}
return getConnDelegate(conn, timeout, timeout*10), nil
},
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
ResponseHeaderTimeout: time.Second * time.Duration(timeout),
IdleConnTimeout: time.Second * time.Duration(DEFAULT_IDLE_CONN_TIMEOUT),
DisableCompression: true,
}

return transport
}

func NewEcsSecurityProvider(retryCount int) *EcsSecurityProvider {
ecsSp := &EcsSecurityProvider{
retryCount: retryCount,
}
ecsSp.httpClient = &http.Client{Transport: getInternalTransport(), CheckRedirect: checkRedirectFunc}
return ecsSp
}

+ 65
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_createSignedUrl.go View File

@@ -0,0 +1,65 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"fmt"
)

// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error
func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput, extensions ...extensionOptions) (output *CreateSignedUrlOutput, err error) {
if input == nil {
return nil, errors.New("CreateSignedUrlInput is nil")
}

params := make(map[string]string, len(input.QueryParams))
for key, value := range input.QueryParams {
params[key] = value
}

if input.SubResource != "" {
params[string(input.SubResource)] = ""
}

headers := make(map[string][]string, len(input.Headers))
for key, value := range input.Headers {
headers[key] = []string{value}
}

for _, extension := range extensions {
if extensionHeader, ok := extension.(extensionHeaders); ok {
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
if _err != nil {
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
}
} else {
doLog(LEVEL_INFO, "Unsupported extensionOptions")
}
}

if input.Expires <= 0 {
input.Expires = 300
}

requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, input.Policy, params, headers, int64(input.Expires))
if err != nil {
return nil, err
}

output = &CreateSignedUrlOutput{
SignedUrl: requestURL,
ActualSignedRequestHeaders: headers,
}
return
}

+ 124
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_other.go View File

@@ -0,0 +1,124 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"fmt"
"strings"
"time"
)

func (obsClient ObsClient) isSecurityToken(params map[string]string, sh securityHolder) {
if sh.securityToken != "" {
if obsClient.conf.signature == SignatureObs {
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
} else {
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
}
}
}

// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput,
// and returns the CreateBrowserBasedSignatureOutput and error
func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
if input == nil {
return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
}

params := make(map[string]string, len(input.FormParams))
for key, value := range input.FormParams {
params[key] = value
}

date := time.Now().UTC()
shortDate := date.Format(SHORT_DATE_FORMAT)
longDate := date.Format(LONG_DATE_FORMAT)
sh := obsClient.getSecurity()

credential, _ := getCredential(sh.ak, obsClient.conf.region, shortDate)

if input.Expires <= 0 {
input.Expires = 300
}

expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
if obsClient.conf.signature == SignatureV4 {
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
params[PARAM_DATE_AMZ_CAMEL] = longDate
}

obsClient.isSecurityToken(params, sh)

matchAnyBucket := true
matchAnyKey := true
count := 5
if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
params["bucket"] = bucket
matchAnyBucket = false
count--
}

if key := strings.TrimSpace(input.Key); key != "" {
params["key"] = key
matchAnyKey = false
count--
}

originPolicySlice := make([]string, 0, len(params)+count)
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
originPolicySlice = append(originPolicySlice, "\"conditions\":[")
for key, value := range params {
if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
}
}

if matchAnyBucket {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
}

if matchAnyKey {
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
}

for _, v := range input.RangeParams {
originPolicySlice = append(originPolicySlice, fmt.Sprintf("[\"%s\", %d, %d],", v.RangeName, v.Lower, v.Upper))
}

lastIndex := len(originPolicySlice) - 1
originPolicySlice[lastIndex] = strings.TrimSuffix(originPolicySlice[lastIndex], ",")

originPolicySlice = append(originPolicySlice, "]}")

originPolicy := strings.Join(originPolicySlice, "")

policy := Base64Encode([]byte(originPolicy))
var signature string
if obsClient.conf.signature == SignatureV4 {
signature = getSignature(policy, sh.sk, obsClient.conf.region, shortDate)
} else {
signature = Base64Encode(HmacSha1([]byte(sh.sk), []byte(policy)))
}

output = &CreateBrowserBasedSignatureOutput{
OriginPolicy: originPolicy,
Policy: policy,
Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
Date: params[PARAM_DATE_AMZ_CAMEL],
Signature: signature,
}
return
}

+ 758
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/temporary_signedUrl.go View File

@@ -0,0 +1,758 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"errors"
"io"
"net/http"
"os"
"strings"
)

// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers
func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
output = &ListBucketsOutput{}
err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
output = &GetBucketStoragePolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
output = &ListObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
output = &ListVersionsOutput{}
err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
output.Location = location[0]
}
if output.EncodingType == "url" {
err = decodeListVersionsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a
// specified bucket with the specified signed url and signed request headers
func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
output = &ListMultipartUploadsOutput{}
err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListMultipartUploadsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
output = nil
}
}
return
}

// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
output = &GetBucketQuotaOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers
func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
output = &GetBucketMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetBucketMetadataOutput(output)
}
return
}

// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
output = &GetBucketStorageInfoOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
output = &GetBucketLocationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
output = &GetBucketAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
output = &GetBucketPolicyOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
if err != nil {
output = nil
}
return
}

// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
output = &GetBucketCorsOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
output = &GetBucketVersioningOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
output = &GetBucketWebsiteConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
output = &GetBucketLoggingConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
output = &GetBucketLifecycleConfigurationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
output = &GetBucketTaggingOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
output = &GetBucketNotificationOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
output = &DeleteObjectOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseDeleteObjectOutput(output)
}
return
}

// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data
func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
output = &DeleteObjectsOutput{}
err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeDeleteObjectsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
output = nil
}
}
return
}

// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
output = &GetObjectAclOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
output.VersionId = versionID[0]
}
}
return
}

// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data
func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
output = &GetObjectMetadataOutput{}
err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectMetadataOutput(output)
}
return
}

// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers
func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
output = &GetObjectOutput{}
err = obsClient.doHTTPWithSignedURL(GET_OBJECT, HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseGetObjectOutput(output)
}
return
}

// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL(PUT_OBJECT, HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}

// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path
func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
var data io.Reader
sourceFile = strings.TrimSpace(sourceFile)
if sourceFile != "" {
fd, _err := os.Open(sourceFile)
if _err != nil {
err = _err
return nil, err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
}
}()

stat, _err := fd.Stat()
if _err != nil {
err = _err
return nil, err
}
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
fileReaderWrapper.reader = fd

var contentLength int64
if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
contentLength = StringToInt64(value[0], -1)
} else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
contentLength = StringToInt64(value[0], -1)
} else {
contentLength = stat.Size()
}
if contentLength > stat.Size() {
return nil, errors.New("ContentLength is larger than fileSize")
}
fileReaderWrapper.totalCount = contentLength
data = fileReaderWrapper
}

output = &PutObjectOutput{}
err = obsClient.doHTTPWithSignedURL(PUT_FILE, HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParsePutObjectOutput(output)
}
return
}

// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers
func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
output = &CopyObjectOutput{}
err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyObjectOutput(output)
}
return
}

// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers
func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
output = &InitiateMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseInitiateMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeInitiateMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}

// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
output = &UploadPartOutput{}
err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseUploadPartOutput(output)
}
return
}

// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID
// with the specified signed url and signed request headers and data
func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
output = &CompleteMultipartUploadOutput{}
err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseCompleteMultipartUploadOutput(output)
if output.EncodingType == "url" {
err = decodeCompleteMultipartUploadOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
output = nil
}
}
}
return
}

// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
output = &ListPartsOutput{}
err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else if output.EncodingType == "url" {
err = decodeListPartsOutput(output)
if err != nil {
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
output = nil
}
}
return
}

// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers
func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
output = &CopyPartOutput{}
err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
} else {
ParseCopyPartOutput(output)
}
return
}

// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) {
output = &GetBucketRequestPaymentOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// SetBucketEncryptionWithSignedURL sets bucket encryption setting for a bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) SetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("SetBucketEncryption", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
}
return
}

// GetBucketEncryptionWithSignedURL gets bucket encryption setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) GetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *GetBucketEncryptionOutput, err error) {
output = &GetBucketEncryptionOutput{}
err = obsClient.doHTTPWithSignedURL("GetBucketEncryption", HTTP_GET, signedURL, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// DeleteBucketEncryptionWithSignedURL deletes bucket encryption setting of a bucket with the specified signed url and signed request headers
func (obsClient ObsClient) DeleteBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
output = &BaseModel{}
err = obsClient.doHTTPWithSignedURL("DeleteBucketEncryption", HTTP_DELETE, signedURL, actualSignedRequestHeaders, nil, output, true)
if err != nil {
output = nil
}
return
}

// AppendObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) AppendObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *AppendObjectOutput, err error) {
output = &AppendObjectOutput{}
err = obsClient.doHTTPWithSignedURL(APPEND_OBJECT, HTTP_POST, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
if err = ParseAppendObjectOutput(output); err != nil {
output = nil
}
}
return
}

// ModifyObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
func (obsClient ObsClient) ModifyObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *ModifyObjectOutput, err error) {
output = &ModifyObjectOutput{}
err = obsClient.doHTTPWithSignedURL("ModifyObject", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
if err != nil {
output = nil
} else {
ParseModifyObjectOutput(output)
}
return
}

+ 154
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_base.go View File

@@ -0,0 +1,154 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"io"
)

type IRepeatable interface {
Reset() error
}

// IReadCloser defines interface with function: setReadCloser
type IReadCloser interface {
setReadCloser(body io.ReadCloser)
}

func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) {
if isObs {
header = HEADER_PREFIX_OBS + header
headers[header] = headerValue
} else {
header = HEADER_PREFIX + header
headers[header] = headerValue
}
}

func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) {
if isObs {
headers[header] = headerValue
} else {
headers[headerNext] = headerValue
}
}

// IBaseModel defines interface for base response model
type IBaseModel interface {
setStatusCode(statusCode int)

setRequestID(requestID string)

setResponseHeaders(responseHeaders map[string][]string)
}

// ISerializable defines interface with function: trans
type ISerializable interface {
trans(isObs bool) (map[string]string, map[string][]string, interface{}, error)
}

// DefaultSerializable defines default serializable struct
type DefaultSerializable struct {
params map[string]string
headers map[string][]string
data interface{}
}

func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) {
return input.params, input.headers, input.data, nil
}

var defaultSerializable = &DefaultSerializable{}

func newSubResourceSerialV2(subResource SubResourceType, value string) *DefaultSerializable {
return &DefaultSerializable{map[string]string{string(subResource): value}, nil, nil}
}

func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
}

func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(subResource): ""}
data, err = ConvertRequestToIoReader(input)
return
}

func (baseModel *BaseModel) setStatusCode(statusCode int) {
baseModel.StatusCode = statusCode
}

func (baseModel *BaseModel) setRequestID(requestID string) {
baseModel.RequestId = requestID
}

func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
baseModel.ResponseHeaders = responseHeaders
}

// GetEncryption gets the Encryption field value from SseKmsHeader
func (header SseKmsHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
if !header.isObs {
return DEFAULT_SSE_KMS_ENCRYPTION
}
return DEFAULT_SSE_KMS_ENCRYPTION_OBS
}

// GetKey gets the Key field value from SseKmsHeader
func (header SseKmsHeader) GetKey() string {
return header.Key
}

// GetEncryption gets the Encryption field value from SseCHeader
func (header SseCHeader) GetEncryption() string {
if header.Encryption != "" {
return header.Encryption
}
return DEFAULT_SSE_C_ENCRYPTION
}

// GetKey gets the Key field value from SseCHeader
func (header SseCHeader) GetKey() string {
return header.Key
}

// GetKeyMD5 gets the KeyMD5 field value from SseCHeader
func (header SseCHeader) GetKeyMD5() string {
if header.KeyMD5 != "" {
return header.KeyMD5
}

if ret, err := Base64Decode(header.GetKey()); err == nil {
return Base64Md5(ret)
}
return ""
}

func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) {
if sseHeader != nil {
if sseCHeader, ok := sseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
} else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
sseKmsHeader.isObs = isObs
setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs)
if sseKmsHeader.GetKey() != "" {
setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs)
}
}
}
}

+ 352
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_bucket.go View File

@@ -0,0 +1,352 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"strings"
)

func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.MaxKeys > 0 {
params["max-keys"] = IntToString(input.MaxKeys)
}
if input.Marker != "" {
params["marker"] = input.Marker
}
headers = make(map[string][]string)
if input.QueryLocation && !isObs {
setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs)
}
if input.BucketType != "" {
setHeaders(headers, HEADER_BUCKET_TYPE, []string{string(input.BucketType)}, true)
}
return
}

func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
if grantReadID := input.GrantReadId; grantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs)
}
if grantWriteID := input.GrantWriteId; grantWriteID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs)
}
if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs)
}
if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs)
}
if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs)
}
if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true)
}
if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true)
}
}

func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
} else if storageClass == string(StorageClassIntelligentTiering) {
doLog(LEVEL_WARN, "Intelligent tiering supports only OBS signature.")
}
}
setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs)
}
if epid := input.Epid; epid != "" {
setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs)
}
if availableZone := input.AvailableZone; availableZone != "" {
setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs)
}

input.prepareGrantHeaders(headers, isObs)
if input.IsFSFileInterface {
setHeaders(headers, headerFSFileInterface, []string{"Enabled"}, true)
}

if location := strings.TrimSpace(input.Location); location != "" {
input.Location = location

xml := make([]string, 0, 3)
xml = append(xml, "<CreateBucketConfiguration>")
if isObs {
xml = append(xml, fmt.Sprintf("<Location>%s</Location>", input.Location))
} else {
xml = append(xml, fmt.Sprintf("<LocationConstraint>%s</LocationConstraint>", input.Location))
}
xml = append(xml, "</CreateBucketConfiguration>")

data = strings.Join(xml, "")
}

if bucketRedundancy := string(input.BucketRedundancy); bucketRedundancy != "" {
setHeaders(headers, HEADER_BUCKET_REDUNDANCY, []string{bucketRedundancy}, isObs)
}
if input.IsFusionAllowUpgrade {
setHeaders(headers, HEADER_FUSION_ALLOW_UPGRADE, []string{"true"}, isObs)
}

if input.IsRedundancyAllowALT {
setHeaders(headers, HEADER_FUSION_ALLOW_ALT, []string{"true"}, isObs)
}

return
}

func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
xml := make([]string, 0, 1)
if !isObs {
storageClass := input.StorageClass
if storageClass == "" {
storageClass = StorageClassStandard
} else if input.StorageClass == StorageClassWarm {
storageClass = storageClassStandardIA
} else if input.StorageClass == StorageClassCold {
storageClass = storageClassGlacier
} else if storageClass == StorageClassIntelligentTiering {
doLog(LEVEL_WARN, "Intelligent tiering supports only OBS signature.")
}
params = map[string]string{string(SubResourceStoragePolicy): ""}
xml = append(xml, fmt.Sprintf("<StoragePolicy><DefaultStorageClass>%s</DefaultStorageClass></StoragePolicy>", storageClass))
} else {
if !IsContain(obsStorageClasses, string(input.StorageClass)) {
input.StorageClass = StorageClassStandard
}
params = map[string]string{string(SubResourceStorageClass): ""}
xml = append(xml, fmt.Sprintf("<StorageClass>%s</StorageClass>", input.StorageClass))
}
data = strings.Join(xml, "")
return
}

func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceQuota, input)
}

func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
headers = make(map[string][]string)

if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs)
}
return
}

func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourcePolicy): ""}
data = strings.NewReader(input.Policy)
return
}

func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceCors): ""}
data, md5OrSha256, err := ConvertRequestToIoReaderV2(input, input.EnableSha256)
if err != nil {
return
}

headerCheckAlgorithm := HEADER_MD5_CAMEL
if input.EnableSha256 {
headerCheckAlgorithm = HEADER_SHA256_CAMEL
}

headers = map[string][]string{headerCheckAlgorithm: {md5OrSha256}}
return
}

func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceVersioning, input)
}

func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceWebsite): ""}
data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
return
}

func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}

func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLogging): ""}
data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs)
return
}

func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceLifecycle): ""}

data, md5OrSha256 := ConvertLifecycleConfigurationToXml(input.BucketLifecycleConfiguration, true, isObs, input.EnableSha256)

headerCheckAlgorithm := HEADER_MD5_CAMEL
if input.EnableSha256 {
headerCheckAlgorithm = HEADER_SHA256_CAMEL
}

headers = map[string][]string{headerCheckAlgorithm: {md5OrSha256}}
return
}

func (input SetBucketEncryptionInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceEncryption): ""}
data, _ = ConvertEncryptionConfigurationToXml(input.BucketEncryptionConfiguration, false, isObs)
return
}

func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceTagging): ""}
data, md5OrSha256, err := ConvertRequestToIoReaderV2(input, input.EnableSha256)
if err != nil {
return
}

headerCheckAlgorithm := HEADER_MD5_CAMEL
if input.EnableSha256 {
headerCheckAlgorithm = HEADER_SHA256_CAMEL
}

headers = map[string][]string{headerCheckAlgorithm: {md5OrSha256}}
return
}

func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceNotification): ""}
data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs)
return
}

func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceRequestPayment, input)
}

func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchPolicyToJSON(input)
return
}

func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
contentType, _ := mimeTypes["json"]
headers = make(map[string][]string, 2)
headers[HEADER_CONTENT_TYPE] = []string{contentType}
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
data, err = convertFetchJobToJSON(input)
return
}

func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string, 1)
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
return
}

func (input SetBucketMirrorBackToSourceInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceMirrorBackToSource): ""}
headers = make(map[string][]string, 1)
headers[HEADER_CONTENT_TYPE] = []string{mimeTypes["json"]}
data = input.Rules
return
}

func (input DeleteBucketCustomDomainInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourceCustomDomain, input)
}

func handleDomainConfig(customDomainConfiguration CustomDomainConfiguration) (headers map[string][]string, data interface{}, err error) {

headers = make(map[string][]string)
if customDomainConfiguration.CertificateId != "" {
err = validateLength(len(customDomainConfiguration.CertificateId), CERT_ID_SIZE, CERT_ID_SIZE, CERTIFICATE_FIELD_NAME)
if err != nil {
return headers, nil, err
}
}

err = validateLength(len(customDomainConfiguration.Name), MIN_CERTIFICATE_NAME_LENGTH, MAX_CERTIFICATE_NAME_LENGTH, NAME_LENGTH)
if err != nil {
return headers, nil, err
}

reader, md5, convertErr := ConvertRequestToIoReaderV2(customDomainConfiguration, false)
if convertErr != nil {
return headers, nil, convertErr
}

readerLen, err := GetReaderLen(reader)
if err != nil {
return headers, nil, err
}

err = validateLength(int(readerLen), 0, MAX_CERT_XML_BODY_SIZE, XML_SIZE)
if err != nil {
return headers, nil, err
}
data = reader

headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetBucketCustomDomainInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceCustomDomain): input.CustomDomain}
headers = make(map[string][]string)
data = nil
if input.CustomDomainConfiguration != nil {
headers, data, err = handleDomainConfig(*input.CustomDomainConfiguration)
}
return
}

func (input PutBucketPublicAccessBlockInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
return trans(SubResourcePublicAccessBlock, input)
}

+ 535
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_object.go View File

@@ -0,0 +1,535 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
"io"
"net/url"
"strconv"
"strings"
)

func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
output.Body = body
}

func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxKeys > 0 {
params["max-keys"] = IntToString(input.MaxKeys)
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
headers = make(map[string][]string)
if origin := strings.TrimSpace(input.Origin); origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
}
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
}
return
}

func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
if input.Marker != "" {
params["marker"] = input.Marker
}
return
}

func (input ListPosixObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
if input.Marker != "" {
params["marker"] = input.Marker
}
return
}

func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ListObjsInput.trans(isObs)
if err != nil {
return
}
params[string(SubResourceVersions)] = ""
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.VersionIdMarker != "" {
params["version-id-marker"] = input.VersionIdMarker
}
return
}

func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceDelete): ""}
if strings.ToLower(input.EncodingType) == "url" {
for index, object := range input.Objects {
input.Objects[index].Key = url.QueryEscape(object.Key)
}
}
data, md5 := convertDeleteObjectsToXML(input)

headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
return
}

func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
} else {
data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs)
}
return
}

func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAcl): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRestore): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
if !isObs {
data, err = ConvertRequestToIoReader(input)
} else {
data = ConventObsRestoreToXml(input)
}
return
}

func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)

if input.Origin != "" {
headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
}

if input.RequestHeader != "" {
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
}
setSseHeader(headers, input.SseHeader, true, isObs)
return
}

func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) {
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
// 这里为了兼容老版本,默认以Expire值为准,但如果Expires没有,则以HttpExpires为准。
if input.Expires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
} else if input.HttpExpires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
}
}

func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) {
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
} else if storageClass == string(StorageClassIntelligentTiering) {
doLog(LEVEL_WARN, "Intelligent tiering supports only OBS signature.")
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
}

func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceMetadata): ""}
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
headers = make(map[string][]string)

if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs)
} else {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs)
}

input.prepareContentHeaders(headers)
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
}
input.prepareStorageClass(headers, isObs)
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}

func (input GetObjectInput) prepareResponseParams(params map[string]string) {
if input.ResponseCacheControl != "" {
params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
}
if input.ResponseContentDisposition != "" {
params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
}
if input.ResponseContentEncoding != "" {
params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
}
if input.ResponseContentLanguage != "" {
params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
}
if input.ResponseContentType != "" {
params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
}
if input.ResponseExpires != "" {
params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
}
}

func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.GetObjectMetadataInput.trans(isObs)
if err != nil {
return
}
input.prepareResponseParams(params)
if input.ImageProcess != "" {
params[PARAM_IMAGE_PROCESS] = input.ImageProcess
}
if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
}
if input.Range != "" {
headers[HEADER_RANGE] = []string{input.Range}
}
if input.AcceptEncoding != "" {
headers[HEADER_ACCEPT_ENCODING] = []string{input.AcceptEncoding}
}
if input.IfMatch != "" {
headers[HEADER_IF_MATCH] = []string{input.IfMatch}
}
if input.IfNoneMatch != "" {
headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
}
if !input.IfModifiedSince.IsZero() {
headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
}
if !input.IfUnmodifiedSince.IsZero() {
headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
}
return
}

func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
if GrantReadID := input.GrantReadId; GrantReadID != "" {
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, isObs)
}
if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" {
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, isObs)
}
if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" {
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, isObs)
}
if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" {
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, isObs)
}
}

func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
params = make(map[string]string)
if acl := string(input.ACL); acl != "" {
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
}
input.prepareGrantHeaders(headers, isObs)
if storageClass := string(input.StorageClass); storageClass != "" {
if !isObs {
if storageClass == string(StorageClassWarm) {
storageClass = string(storageClassStandardIA)
} else if storageClass == string(StorageClassCold) {
storageClass = string(storageClassGlacier)
} else if storageClass == string(StorageClassIntelligentTiering) {
doLog(LEVEL_WARN, "Intelligent tiering supports only OBS signature.")
}
}
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
}
if input.WebsiteRedirectLocation != "" {
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)

}
setSseHeader(headers, input.SseHeader, false, isObs)
if input.Expires != 0 {
setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true)
}
if input.Metadata != nil {
for key, value := range input.Metadata {
key = strings.TrimSpace(key)
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
}
}
return
}

func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}

if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}

if input.ContentSHA256 != "" {
setHeaders(headers, HEADER_SHA256, []string{input.ContentSHA256}, isObs)
}

if input.ContentLength > 0 {
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
}
if input.HttpExpires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
}
return
}

func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
if err != nil {
return
}
if input.Body != nil {
data = input.Body
}
return
}

func (input AppendObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
if err != nil {
return
}
params[string(SubResourceAppend)] = ""
params["position"] = strconv.FormatInt(input.Position, 10)
if input.Body != nil {
data = input.Body
}
return
}

func (input ModifyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
headers = make(map[string][]string)
params = make(map[string]string)
params[string(SubResourceModify)] = ""
params["position"] = strconv.FormatInt(input.Position, 10)
if input.ContentLength > 0 {
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
}

if input.Body != nil {
data = input.Body
}
return
}

func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) {
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
}
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
}
// 这里为了兼容老版本,默认以Expire值为准,但如果Expires没有,则以HttpExpires为准。
if input.Expires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
} else if input.HttpExpires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
}
}

func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) {
if input.CopySourceIfMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs)
}
if input.CopySourceIfNoneMatch != "" {
setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs)
}
if !input.CopySourceIfModifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs)
}
if !input.CopySourceIfUnmodifiedSince.IsZero() {
setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs)
}
}

func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}

var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)

if directive := string(input.MetadataDirective); directive != "" {
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs)
}

if input.MetadataDirective == ReplaceMetadata {
input.prepareReplaceHeaders(headers)
}

input.prepareCopySourceHeaders(headers, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}
}
if input.SuccessActionRedirect != "" {
headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect}
}
return
}

func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = make(map[string]string)
if input.VersionId != "" {
params[PARAM_VERSION_ID] = input.VersionId
}
return
}

func (input RenameFileInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRename): ""}
params["name"] = input.NewObjectKey
headers = make(map[string][]string)
if requestPayer := string(input.RequestPayer); requestPayer != "" {
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
}
return
}

func (input RenameFolderInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceRename): ""}
params["name"] = input.NewObjectKey
headers = make(map[string][]string)
if requestPayer := string(input.RequestPayer); requestPayer != "" {
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
}
return
}

func (input SetDirAccesslabelInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAccesslabel): ""}

accesslabelJson, err := TransToJSON(input.Accesslabel)
if err != nil {
return
}
json := make([]string, 0, 2)
json = append(json, fmt.Sprintf("{\"accesslabel\": %s}", accesslabelJson))
data = strings.Join(json, "")

return
}

func (input GetDirAccesslabelInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAccesslabel): ""}
return
}

func (input DeleteDirAccesslabelInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceAccesslabel): ""}
return
}

+ 75
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_other.go View File

@@ -0,0 +1,75 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"io"
"os"
"strings"
)

type partSlice []Part

func (parts partSlice) Len() int {
return len(parts)
}

func (parts partSlice) Less(i, j int) bool {
return parts[i].PartNumber < parts[j].PartNumber
}

func (parts partSlice) Swap(i, j int) {
parts[i], parts[j] = parts[j], parts[i]
}

type readerWrapper struct {
reader io.Reader
mark int64
totalCount int64
readedCount int64
}

func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
if r, ok := rw.reader.(*strings.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*bytes.Reader); ok {
return r.Seek(offset, whence)
} else if r, ok := rw.reader.(*os.File); ok {
return r.Seek(offset, whence)
}
return offset, nil
}

func (rw *readerWrapper) Read(p []byte) (n int, err error) {
if rw.totalCount == 0 {
return 0, io.EOF
}
if rw.totalCount > 0 {
n, err = rw.reader.Read(p)
readedOnce := int64(n)
remainCount := rw.totalCount - rw.readedCount
if remainCount > readedOnce {
rw.readedCount += readedOnce
return n, err
}
rw.readedCount += remainCount
return int(remainCount), io.EOF
}
return rw.reader.Read(p)
}

type fileReaderWrapper struct {
readerWrapper
filePath string
}

+ 142
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/trait_part.go View File

@@ -0,0 +1,142 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"fmt"
)

func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{string(SubResourceUploads): ""}
if input.Prefix != "" {
params["prefix"] = input.Prefix
}
if input.Delimiter != "" {
params["delimiter"] = input.Delimiter
}
if input.MaxUploads > 0 {
params["max-uploads"] = IntToString(input.MaxUploads)
}
if input.KeyMarker != "" {
params["key-marker"] = input.KeyMarker
}
if input.UploadIdMarker != "" {
params["upload-id-marker"] = input.UploadIdMarker
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}

func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
return
}

func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
if err != nil {
return
}
if input.ContentType != "" {
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
}
if input.ContentEncoding != "" {
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
}
if input.CacheControl != "" {
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
}
if input.ContentDisposition != "" {
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
}
if input.ContentLanguage != "" {
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
}
if input.HttpExpires != "" {
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
}
params[string(SubResourceUploads)] = ""
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}

func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string)
setSseHeader(headers, input.SseHeader, true, isObs)
if input.ContentMD5 != "" {
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
}
if input.ContentSHA256 != "" {
setHeaders(headers, HEADER_SHA256, []string{input.ContentSHA256}, isObs)
}
if input.Body != nil {
data = input.Body
}
return
}

func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
return
}

func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId}
if input.MaxParts > 0 {
params["max-parts"] = IntToString(input.MaxParts)
}
if input.PartNumberMarker > 0 {
params["part-number-marker"] = IntToString(input.PartNumberMarker)
}
if input.EncodingType != "" {
params["encoding-type"] = input.EncodingType
}
return
}

func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
headers = make(map[string][]string, 1)
var copySource string
if input.CopySourceVersionId != "" {
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
} else {
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
}
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs)
}
if input.CopySourceRange != "" {
setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{input.CopySourceRange}, isObs)
}
setSseHeader(headers, input.SseHeader, true, isObs)
if input.SourceSseHeader != nil {
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
}

}
return
}

+ 925
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/transfer.go View File

@@ -0,0 +1,925 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"sync/atomic"
"syscall"
)

var errAbort = errors.New("AbortError")

// FileStatus defines the upload file properties
type FileStatus struct {
XMLName xml.Name `xml:"FileInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
}

// UploadPartInfo defines the upload part properties
type UploadPartInfo struct {
XMLName xml.Name `xml:"UploadPart"`
PartNumber int `xml:"PartNumber"`
Etag string `xml:"Etag"`
PartSize int64 `xml:"PartSize"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}

// UploadCheckpoint defines the upload checkpoint file properties
type UploadCheckpoint struct {
XMLName xml.Name `xml:"UploadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
UploadId string `xml:"UploadId,omitempty"`
UploadFile string `xml:"FileUrl"`
FileInfo FileStatus `xml:"FileInfo"`
UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"`
}

func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool {
if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.")
return false
}

if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.")
return false
}

if ufc.UploadId == "" {
doLog(LEVEL_INFO, "UploadId is invalid. clear the record.")
return false
}

return true
}

type uploadPartTask struct {
UploadPartInput
obsClient *ObsClient
abort *int32
extensions []extensionOptions
enableCheckpoint bool
}

func (task *uploadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}

input := &UploadPartInput{}
input.Bucket = task.Bucket
input.Key = task.Key
input.PartNumber = task.PartNumber
input.UploadId = task.UploadId
input.SseHeader = task.SseHeader
input.SourceFile = task.SourceFile
input.Offset = task.Offset
input.PartSize = task.PartSize
input.ContentMD5 = task.ContentMD5
input.ContentSHA256 = task.ContentSHA256

extensions := task.extensions

var output *UploadPartOutput
var err error
if len(extensions) != 0 {
output, err = task.obsClient.UploadPart(input, extensions...)
} else {
output, err = task.obsClient.UploadPart(input)
}

if err == nil {
if output.ETag == "" {
doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber)
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber)
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
}
return err
}

func loadCheckpointFile(checkpointFile string, result interface{}) error {
ret, err := ioutil.ReadFile(checkpointFile)
if err != nil {
return err
}
if len(ret) == 0 {
return nil
}
return xml.Unmarshal(ret, result)
}

func updateCheckpointFile(fc interface{}, checkpointFilePath string) error {
result, err := xml.Marshal(fc)
if err != nil {
return err
}
err = ioutil.WriteFile(checkpointFilePath, result, 0640)
return err
}

func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, ufc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) {
if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err))
}
} else {
return false, nil
}

return true, nil
}

func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error {
initiateInput := &InitiateMultipartUploadInput{}
initiateInput.ObjectOperationInput = input.ObjectOperationInput
initiateInput.HttpHeader = HttpHeader{
CacheControl: input.CacheControl,
ContentEncoding: input.ContentEncoding,
ContentType: input.ContentType,
ContentDisposition: input.ContentDisposition,
ContentLanguage: input.ContentLanguage,
HttpExpires: input.HttpExpires,
}
initiateInput.EncodingType = input.EncodingType
var output *InitiateMultipartUploadOutput
var err error
if len(extensions) != 0 {
output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...)
} else {
output, err = obsClient.InitiateMultipartUpload(initiateInput)
}
if err != nil {
return err
}

ufc.Bucket = input.Bucket
ufc.Key = input.Key
ufc.UploadFile = input.UploadFile
ufc.FileInfo = FileStatus{}
ufc.FileInfo.Size = uploadFileStat.Size()
ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix()
ufc.UploadId = output.UploadId

err = sliceFile(input.PartSize, ufc)
return err
}

func sliceFile(partSize int64, ufc *UploadCheckpoint) error {
fileSize := ufc.FileInfo.Size
cnt := fileSize / partSize
if cnt >= 10000 {
partSize = fileSize / 10000
if fileSize%10000 != 0 {
partSize++
}
cnt = fileSize / partSize
}
if fileSize%partSize != 0 {
cnt++
}

if partSize > MAX_PART_SIZE {
doLog(LEVEL_ERROR, "The source upload file is too large")
return fmt.Errorf("The source upload file is too large")
}

if cnt == 0 {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = 1
ufc.UploadParts = []UploadPartInfo{uploadPart}
} else {
uploadParts := make([]UploadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
uploadPart := UploadPartInfo{}
uploadPart.PartNumber = int(i) + 1
uploadPart.PartSize = partSize
uploadPart.Offset = i * partSize
uploadParts = append(uploadParts, uploadPart)
}
if value := fileSize % partSize; value != 0 {
uploadParts[cnt-1].PartSize = value
}
ufc.UploadParts = uploadParts
}
return nil
}

func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error {
input := &AbortMultipartUploadInput{}
input.Bucket = bucket
input.Key = key
input.UploadId = uploadID
if len(extensions) != 0 {
_, err := obsClient.AbortMultipartUpload(input, extensions...)
return err
}
_, err := obsClient.AbortMultipartUpload(input)
return err
}

func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error {
if uploadPartError != nil {
if enableCheckpoint {
return uploadPartError
}
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return uploadPartError
}
return nil
}

func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, encodingType string, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
completeInput := &CompleteMultipartUploadInput{}
completeInput.Bucket = ufc.Bucket
completeInput.Key = ufc.Key
completeInput.UploadId = ufc.UploadId
completeInput.EncodingType = encodingType
parts := make([]Part, 0, len(ufc.UploadParts))
for _, uploadPart := range ufc.UploadParts {
part := Part{}
part.PartNumber = uploadPart.PartNumber
part.ETag = uploadPart.Etag
parts = append(parts, part)
}
completeInput.Parts = parts
var completeOutput *CompleteMultipartUploadOutput
if len(extensions) != 0 {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...)
} else {
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput)
}

if err == nil {
if enableCheckpoint {
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err)
}
}
return completeOutput, err
}
if !enableCheckpoint {
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
}
return completeOutput, err
}

func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
uploadFileStat, err := os.Stat(input.UploadFile)
if err != nil {
doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err))
return nil, err
}
if uploadFileStat.IsDir() {
doLog(LEVEL_ERROR, "UploadFile can not be a folder.")
return nil, errors.New("uploadFile can not be a folder")
}

ufc := &UploadCheckpoint{}

var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}
}
if needCheckpoint {
err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions)
if err != nil {
return nil, err
}

if enableCheckpoint {
err = updateCheckpointFile(ufc, checkpointFilePath)
if err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err)
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions)
if _err != nil {
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
}
return nil, err
}
}
}

uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions)
err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions)
if err != nil {
return nil, err
}

completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, input.EncodingType, extensions)

return completeOutput, err
}

func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex, completedBytes *int64, listener ProgressListener) (err error) {
if uploadPartOutput, ok := result.(*UploadPartOutput); ok {
lock.Lock()
defer lock.Unlock()
ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag
ufc.UploadParts[partNum-1].IsCompleted = true

atomic.AddInt64(completedBytes, ufc.UploadParts[partNum-1].PartSize)

event := newProgressEvent(TransferDataEvent, *completedBytes, ufc.FileInfo.Size)
publishProgress(listener, event)

if enableCheckpoint {
_err := updateCheckpointFile(ufc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}

func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var uploadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)

var completedBytes int64
listener := obsClient.getProgressListener(extensions)
totalBytes := ufc.FileInfo.Size
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)

for _, uploadPart := range ufc.UploadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if uploadPart.IsCompleted {
atomic.AddInt64(&completedBytes, uploadPart.PartSize)
event := newProgressEvent(TransferDataEvent, completedBytes, ufc.FileInfo.Size)
publishProgress(listener, event)
continue
}
task := uploadPartTask{
UploadPartInput: UploadPartInput{
Bucket: ufc.Bucket,
Key: ufc.Key,
PartNumber: uploadPart.PartNumber,
UploadId: ufc.UploadId,
SseHeader: input.SseHeader,
SourceFile: input.UploadFile,
Offset: uploadPart.Offset,
PartSize: uploadPart.PartSize,
},
obsClient: &obsClient,
abort: &abort,
extensions: extensions,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock, &completedBytes, listener)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
uploadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := uploadPartError.Load().(error); ok {

event := newProgressEvent(TransferFailedEvent, completedBytes, ufc.FileInfo.Size)
publishProgress(listener, event)

return err
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ufc.FileInfo.Size)
publishProgress(listener, event)
return nil
}

// ObjectInfo defines download object info
type ObjectInfo struct {
XMLName xml.Name `xml:"ObjectInfo"`
LastModified int64 `xml:"LastModified"`
Size int64 `xml:"Size"`
ETag string `xml:"ETag"`
}

// TempFileInfo defines temp download file properties
type TempFileInfo struct {
XMLName xml.Name `xml:"TempFileInfo"`
TempFileUrl string `xml:"TempFileUrl"`
Size int64 `xml:"Size"`
}

// DownloadPartInfo defines download part properties
type DownloadPartInfo struct {
XMLName xml.Name `xml:"DownloadPart"`
PartNumber int64 `xml:"PartNumber"`
RangeEnd int64 `xml:"RangeEnd"`
Offset int64 `xml:"Offset"`
IsCompleted bool `xml:"IsCompleted"`
}

// DownloadCheckpoint defines download checkpoint file properties
type DownloadCheckpoint struct {
XMLName xml.Name `xml:"DownloadFileCheckpoint"`
Bucket string `xml:"Bucket"`
Key string `xml:"Key"`
VersionId string `xml:"VersionId,omitempty"`
DownloadFile string `xml:"FileUrl"`
ObjectInfo ObjectInfo `xml:"ObjectInfo"`
TempFileInfo TempFileInfo `xml:"TempFileInfo"`
DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"`
}

func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool {
if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.")
return false
}
if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.")
return false
}
if dfc.TempFileInfo.Size != output.ContentLength {
doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.")
return false
}
stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl)
if err != nil || stat.Size() != dfc.ObjectInfo.Size {
doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.")
return false
}

return true
}

type downloadPartTask struct {
GetObjectInput
obsClient *ObsClient
extensions []extensionOptions
abort *int32
partNumber int64
tempFileURL string
enableCheckpoint bool
}

func (task *downloadPartTask) Run() interface{} {
if atomic.LoadInt32(task.abort) == 1 {
return errAbort
}
getObjectInput := &GetObjectInput{}
getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput
getObjectInput.IfMatch = task.IfMatch
getObjectInput.IfNoneMatch = task.IfNoneMatch
getObjectInput.IfModifiedSince = task.IfModifiedSince
getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince
getObjectInput.RangeStart = task.RangeStart
getObjectInput.RangeEnd = task.RangeEnd
getObjectInput.Range = fmt.Sprintf("bytes=%d-%d", getObjectInput.RangeStart, getObjectInput.RangeEnd)
var output *GetObjectOutput
var err error
if len(task.extensions) != 0 {
output, err = task.obsClient.GetObjectWithoutProgress(getObjectInput, task.extensions...)
} else {
output, err = task.obsClient.GetObjectWithoutProgress(getObjectInput)
}

if err == nil {
defer func() {
errMsg := output.Body.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close response body.")
}
}()
_err := updateDownloadFile(task.tempFileURL, task.RangeStart, output)
if _err != nil {
if !task.enableCheckpoint {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return _err
}
return output
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
atomic.CompareAndSwapInt32(task.abort, 0, 1)
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
}
return err
}

func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) {
if len(extensions) != 0 {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...)
} else {
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput)
}

return
}

func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) {
checkpointFilePath := input.CheckpointFile
checkpointFileStat, err := os.Stat(checkpointFilePath)
if err != nil {
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
return true, nil
}
if checkpointFileStat.IsDir() {
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
return false, errors.New("checkpoint file can not be a folder")
}
err = loadCheckpointFile(checkpointFilePath, dfc)
if err != nil {
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
return true, nil
} else if !dfc.isValid(input, output) {
if dfc.TempFileInfo.TempFileUrl != "" {
_err := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
_err := os.Remove(checkpointFilePath)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err)
}
} else {
return false, nil
}

return true, nil
}

func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) {
cnt := objectSize / partSize
if objectSize%partSize > 0 {
cnt++
}

if cnt == 0 {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = 1
dfc.DownloadParts = []DownloadPartInfo{downloadPart}
} else {
downloadParts := make([]DownloadPartInfo, 0, cnt)
var i int64
for i = 0; i < cnt; i++ {
downloadPart := DownloadPartInfo{}
downloadPart.PartNumber = i + 1
downloadPart.Offset = i * partSize
downloadPart.RangeEnd = (i+1)*partSize - 1
downloadParts = append(downloadParts, downloadPart)
}
dfc.DownloadParts = downloadParts
if value := objectSize % partSize; value > 0 {
dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1
}
}
}

func createFile(tempFileURL string, fileSize int64) error {
fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640)
if err != nil {
doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := syscall.Close(fd)
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
err = syscall.Ftruncate(fd, fileSize)
if err != nil {
doLog(LEVEL_WARN, "Failed to create file with error [%v].", err)
}
return err
}

func prepareTempFile(tempFileURL string, fileSize int64) error {
parentDir := filepath.Dir(tempFileURL)
stat, err := os.Stat(parentDir)
if err != nil {
doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err)
_err := os.MkdirAll(parentDir, os.ModePerm)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err)
return _err
}
} else if !stat.IsDir() {
doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir)
return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir)
}

err = createFile(tempFileURL, fileSize)
if err == nil {
return nil
}
fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
if fileSize > 0 {
_, err = fd.WriteAt([]byte("a"), fileSize-1)
if err != nil {
doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err)
return err
}
}

return nil
}

func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error {
if downloadFileError != nil {
if !enableCheckpoint {
_err := os.Remove(tempFileURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
}
}
return downloadFileError
}
return nil
}

func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) {
getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions)
if err != nil {
return nil, err
}

objectSize := getObjectmetaOutput.ContentLength
partSize := input.PartSize
dfc := &DownloadCheckpoint{}

var needCheckpoint = true
var checkpointFilePath = input.CheckpointFile
var enableCheckpoint = input.EnableCheckpoint
if enableCheckpoint {
needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput)
if err != nil {
return nil, err
}
}

if needCheckpoint {
dfc.Bucket = input.Bucket
dfc.Key = input.Key
dfc.VersionId = input.VersionId
dfc.DownloadFile = input.DownloadFile
dfc.ObjectInfo = ObjectInfo{}
dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix()
dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength
dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag
dfc.TempFileInfo = TempFileInfo{}
dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp"
dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength

sliceObject(objectSize, partSize, dfc)
_err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size)
if _err != nil {
return nil, _err
}

if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFilePath)
if _err != nil {
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err)
_errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl)
if _errMsg != nil {
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg)
}
return nil, _err
}
}
}

downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions)
err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError)
if err != nil {
return nil, err
}

err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile)
if err != nil {
doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err)
return nil, err
}
if enableCheckpoint {
err = os.Remove(checkpointFilePath)
if err != nil {
doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err)
}
}

return getObjectmetaOutput, nil
}

func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error {
fd, err := os.OpenFile(filePath, os.O_WRONLY, 0640)
if err != nil {
doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath)
return err
}
defer func() {
errMsg := fd.Close()
if errMsg != nil {
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
}
}()
_, err = fd.Seek(rangeStart, 0)
if err != nil {
doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err)
return err
}
fileWriter := bufio.NewWriterSize(fd, 65536)
part := make([]byte, 8192)
var readErr error
var readCount int
for {
readCount, readErr = output.Body.Read(part)
if readCount > 0 {
wcnt, werr := fileWriter.Write(part[0:readCount])
if werr != nil {
doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr)
return werr
}
if wcnt != readCount {
doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
}
}
if readErr != nil {
if readErr != io.EOF {
doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr)
return readErr
}
break
}
}
err = fileWriter.Flush()
if err != nil {
doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err)
return err
}
return nil
}

func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex, completedBytes *int64, listener ProgressListener) (err error) {
if output, ok := result.(*GetObjectOutput); ok {
lock.Lock()
defer lock.Unlock()
dfc.DownloadParts[partNum-1].IsCompleted = true

atomic.AddInt64(completedBytes, output.ContentLength)

event := newProgressEvent(TransferDataEvent, *completedBytes, dfc.ObjectInfo.Size)
publishProgress(listener, event)

if enableCheckpoint {
_err := updateCheckpointFile(dfc, checkpointFile)
if _err != nil {
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
}
}
} else if result != errAbort {
if _err, ok := result.(error); ok {
err = _err
}
}
return
}

func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error {
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
var downloadPartError atomic.Value
var errFlag int32
var abort int32
lock := new(sync.Mutex)

var completedBytes int64
listener := obsClient.getProgressListener(extensions)
totalBytes := dfc.ObjectInfo.Size
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
publishProgress(listener, event)

for _, downloadPart := range dfc.DownloadParts {
if atomic.LoadInt32(&abort) == 1 {
break
}
if downloadPart.IsCompleted {
atomic.AddInt64(&completedBytes, downloadPart.RangeEnd-downloadPart.Offset+1)
event := newProgressEvent(TransferDataEvent, completedBytes, dfc.ObjectInfo.Size)
publishProgress(listener, event)
continue
}
task := downloadPartTask{
GetObjectInput: GetObjectInput{
GetObjectMetadataInput: input.GetObjectMetadataInput,
IfMatch: input.IfMatch,
IfNoneMatch: input.IfNoneMatch,
IfUnmodifiedSince: input.IfUnmodifiedSince,
IfModifiedSince: input.IfModifiedSince,
RangeStart: downloadPart.Offset,
RangeEnd: downloadPart.RangeEnd,
},
obsClient: &obsClient,
extensions: extensions,
abort: &abort,
partNumber: downloadPart.PartNumber,
tempFileURL: dfc.TempFileInfo.TempFileUrl,
enableCheckpoint: input.EnableCheckpoint,
}
pool.ExecuteFunc(func() interface{} {
result := task.Run()
err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock, &completedBytes, listener)
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
downloadPartError.Store(err)
}
return nil
})
}
pool.ShutDown()
if err, ok := downloadPartError.Load().(error); ok {
event := newProgressEvent(TransferFailedEvent, completedBytes, dfc.ObjectInfo.Size)
publishProgress(listener, event)
return err
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, dfc.ObjectInfo.Size)
publishProgress(listener, event)
return nil
}

+ 372
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/type.go View File

@@ -0,0 +1,372 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

// SignatureType defines type of signature
type SignatureType string

const (
// SignatureV2 signature type v2
SignatureV2 SignatureType = "v2"
// SignatureV4 signature type v4
SignatureV4 SignatureType = "v4"
// SignatureObs signature type OBS
SignatureObs SignatureType = "OBS"
)

// HttpMethodType defines http method type
type HttpMethodType string

const (
HttpMethodGet HttpMethodType = HTTP_GET
HttpMethodPut HttpMethodType = HTTP_PUT
HttpMethodPost HttpMethodType = HTTP_POST
HttpMethodDelete HttpMethodType = HTTP_DELETE
HttpMethodHead HttpMethodType = HTTP_HEAD
HttpMethodOptions HttpMethodType = HTTP_OPTIONS
)

// SubResourceType defines the subResource value
type SubResourceType string

const (
// SubResourceStoragePolicy subResource value: storagePolicy
SubResourceStoragePolicy SubResourceType = "storagePolicy"

// SubResourceStorageClass subResource value: storageClass
SubResourceStorageClass SubResourceType = "storageClass"

// SubResourceQuota subResource value: quota
SubResourceQuota SubResourceType = "quota"

// SubResourceStorageInfo subResource value: storageinfo
SubResourceStorageInfo SubResourceType = "storageinfo"

// SubResourceLocation subResource value: location
SubResourceLocation SubResourceType = "location"

// SubResourceAcl subResource value: acl
SubResourceAcl SubResourceType = "acl"

// SubResourcePolicy subResource value: policy
SubResourcePolicy SubResourceType = "policy"

// SubResourceCors subResource value: cors
SubResourceCors SubResourceType = "cors"

// SubResourceVersioning subResource value: versioning
SubResourceVersioning SubResourceType = "versioning"

// SubResourceWebsite subResource value: website
SubResourceWebsite SubResourceType = "website"

// SubResourceLogging subResource value: logging
SubResourceLogging SubResourceType = "logging"

// SubResourceLifecycle subResource value: lifecycle
SubResourceLifecycle SubResourceType = "lifecycle"

// SubResourceNotification subResource value: notification
SubResourceNotification SubResourceType = "notification"

// SubResourceEncryption subResource value: encryption
SubResourceEncryption SubResourceType = "encryption"

// SubResourceTagging subResource value: tagging
SubResourceTagging SubResourceType = "tagging"

// SubResourceDelete subResource value: delete
SubResourceDelete SubResourceType = "delete"

// SubResourceVersions subResource value: versions
SubResourceVersions SubResourceType = "versions"

// SubResourceUploads subResource value: uploads
SubResourceUploads SubResourceType = "uploads"

// SubResourceRestore subResource value: restore
SubResourceRestore SubResourceType = "restore"

// SubResourceMetadata subResource value: metadata
SubResourceMetadata SubResourceType = "metadata"

// SubResourceRequestPayment subResource value: requestPayment
SubResourceRequestPayment SubResourceType = "requestPayment"

// SubResourceAppend subResource value: append
SubResourceAppend SubResourceType = "append"

// SubResourceModify subResource value: modify
SubResourceModify SubResourceType = "modify"

// SubResourceRename subResource value: rename
SubResourceRename SubResourceType = "rename"

// SubResourceCustomDomain subResource value: customdomain
SubResourceCustomDomain SubResourceType = "customdomain"

// SubResourceMirrorBackToSource subResource value: mirrorBackToSource
SubResourceMirrorBackToSource SubResourceType = "mirrorBackToSource"

// SubResourceMirrorBackToSource subResource value: mirrorBackToSource
SubResourceAccesslabel SubResourceType = "x-obs-accesslabel"

// SubResourceMirrorBackToSource subResource value: publicAccessBlock
SubResourcePublicAccessBlock SubResourceType = "publicAccessBlock"

// SubResourcePublicBucketStatus subResource value: bucketStatus
SubResourceBucketPublicStatus SubResourceType = "bucketStatus"

// SubResourcePublicPolicyStatus subResource value: policyStatus
SubResourceBucketPolicyPublicStatus SubResourceType = "policyStatus"
)

// objectKeyType defines the objectKey value
type objectKeyType string

const (
// objectKeyExtensionPolicy objectKey value: v1/extension_policy
objectKeyExtensionPolicy objectKeyType = "v1/extension_policy"

// objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs
objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs"
)

// AclType defines bucket/object acl type
type AclType string

const (
AclPrivate AclType = "private"
AclPublicRead AclType = "public-read"
AclPublicReadWrite AclType = "public-read-write"
AclAuthenticatedRead AclType = "authenticated-read"
AclBucketOwnerRead AclType = "bucket-owner-read"
AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
AclLogDeliveryWrite AclType = "log-delivery-write"
AclPublicReadDelivery AclType = "public-read-delivered"
AclPublicReadWriteDelivery AclType = "public-read-write-delivered"
)

// StorageClassType defines bucket storage class
type StorageClassType string

const (
//StorageClassStandard storage class: STANDARD
StorageClassStandard StorageClassType = "STANDARD"

//StorageClassWarm storage class: WARM
StorageClassWarm StorageClassType = "WARM"

//StorageClassCold storage class: COLD
StorageClassCold StorageClassType = "COLD"

//StorageClassDeepArchive storage class: DEEP_ARCHIVE
StorageClassDeepArchive StorageClassType = "DEEP_ARCHIVE"

//StorageClassIntelligentTiering storage class: INTELLIGENT_TIERING
StorageClassIntelligentTiering StorageClassType = "INTELLIGENT_TIERING"

storageClassStandardIA StorageClassType = "STANDARD_IA"
storageClassGlacier StorageClassType = "GLACIER"
)

// PermissionType defines permission type
type PermissionType string

const (
// PermissionRead permission type: READ
PermissionRead PermissionType = "READ"

// PermissionWrite permission type: WRITE
PermissionWrite PermissionType = "WRITE"

// PermissionReadAcp permission type: READ_ACP
PermissionReadAcp PermissionType = "READ_ACP"

// PermissionWriteAcp permission type: WRITE_ACP
PermissionWriteAcp PermissionType = "WRITE_ACP"

// PermissionFullControl permission type: FULL_CONTROL
PermissionFullControl PermissionType = "FULL_CONTROL"
)

// GranteeType defines grantee type
type GranteeType string

const (
// GranteeGroup grantee type: Group
GranteeGroup GranteeType = "Group"

// GranteeUser grantee type: CanonicalUser
GranteeUser GranteeType = "CanonicalUser"
)

// GroupUriType defines grantee uri type
type GroupUriType string

const (
// GroupAllUsers grantee uri type: AllUsers
GroupAllUsers GroupUriType = "AllUsers"

// GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers
GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers"

// GroupLogDelivery grantee uri type: LogDelivery
GroupLogDelivery GroupUriType = "LogDelivery"
)

// VersioningStatusType defines bucket version status
type VersioningStatusType string

const (
// VersioningStatusEnabled version status: Enabled
VersioningStatusEnabled VersioningStatusType = "Enabled"

// VersioningStatusSuspended version status: Suspended
VersioningStatusSuspended VersioningStatusType = "Suspended"
)

// ProtocolType defines protocol type
type ProtocolType string

const (
// ProtocolHttp prorocol type: http
ProtocolHttp ProtocolType = "http"

// ProtocolHttps prorocol type: https
ProtocolHttps ProtocolType = "https"
)

// RuleStatusType defines lifeCycle rule status
type RuleStatusType string

const (
// RuleStatusEnabled rule status: Enabled
RuleStatusEnabled RuleStatusType = "Enabled"

// RuleStatusDisabled rule status: Disabled
RuleStatusDisabled RuleStatusType = "Disabled"
)

// RestoreTierType defines restore options
type RestoreTierType string

const (
// RestoreTierExpedited restore options: Expedited
RestoreTierExpedited RestoreTierType = "Expedited"

// RestoreTierStandard restore options: Standard
RestoreTierStandard RestoreTierType = "Standard"

// RestoreTierBulk restore options: Bulk
RestoreTierBulk RestoreTierType = "Bulk"
)

// MetadataDirectiveType defines metadata operation indicator
type MetadataDirectiveType string

const (
// CopyMetadata metadata operation: COPY
CopyMetadata MetadataDirectiveType = "COPY"

// ReplaceNew metadata operation: REPLACE_NEW
ReplaceNew MetadataDirectiveType = "REPLACE_NEW"

// ReplaceMetadata metadata operation: REPLACE
ReplaceMetadata MetadataDirectiveType = "REPLACE"
)

// EventType defines bucket notification type of events
type EventType string

const (
// ObjectCreatedAll type of events: ObjectCreated:*
ObjectCreatedAll EventType = "ObjectCreated:*"

// ObjectCreatedPut type of events: ObjectCreated:Put
ObjectCreatedPut EventType = "ObjectCreated:Put"

// ObjectCreatedPost type of events: ObjectCreated:Post
ObjectCreatedPost EventType = "ObjectCreated:Post"

// ObjectCreatedCopy type of events: ObjectCreated:Copy
ObjectCreatedCopy EventType = "ObjectCreated:Copy"

// ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload
ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload"

// ObjectRemovedAll type of events: ObjectRemoved:*
ObjectRemovedAll EventType = "ObjectRemoved:*"

// ObjectRemovedDelete type of events: ObjectRemoved:Delete
ObjectRemovedDelete EventType = "ObjectRemoved:Delete"

// ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated
ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated"
)

// PayerType defines type of payer
type PayerType string

const (
// BucketOwnerPayer type of payer: BucketOwner
BucketOwnerPayer PayerType = "BucketOwner"

// RequesterPayer type of payer: Requester
RequesterPayer PayerType = "Requester"

// Requester header for requester-Pays
Requester PayerType = "requester"
)

// FetchPolicyStatusType defines type of fetch policy status
type FetchPolicyStatusType string

const (
// FetchStatusOpen type of status: open
FetchStatusOpen FetchPolicyStatusType = "open"

// FetchStatusClosed type of status: closed
FetchStatusClosed FetchPolicyStatusType = "closed"
)

// AvailableZoneType defines type of az redundancy
type AvailableZoneType string

const (
AvailableZoneMultiAz AvailableZoneType = "3az"
)

// FSStatusType defines type of file system status
type FSStatusType string

const (
FSStatusEnabled FSStatusType = "Enabled"
FSStatusDisabled FSStatusType = "Disabled"
)

// BucketType defines type of bucket
type BucketType string

const (
OBJECT BucketType = "OBJECT"
POSIX BucketType = "POSIX"
)

// RedundancyType defines type of redundancyType
type BucketRedundancyType string

const (
BucketRedundancyClassic BucketRedundancyType = "CLASSIC"
BucketRedundancyFusion BucketRedundancyType = "FUSION"
)

+ 661
- 0
vendor/github.com/huaweicloud/huaweicloud-sdk-go-obs/obs/util.go View File

@@ -0,0 +1,661 @@
// Copyright 2019 Huawei Technologies Co.,Ltd.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

package obs

import (
"bytes"
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
)

var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$")
var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+")
var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+")

// StringContains replaces subStr in src with subTranscoding and returns the new string
func StringContains(src string, subStr string, subTranscoding string) string {
return strings.Replace(src, subStr, subTranscoding, -1)
}

// XmlTranscoding replaces special characters with their escaped form
func XmlTranscoding(src string) string {
srcTmp := StringContains(src, "&", "&amp;")
srcTmp = StringContains(srcTmp, "<", "&lt;")
srcTmp = StringContains(srcTmp, ">", "&gt;")
srcTmp = StringContains(srcTmp, "'", "&apos;")
srcTmp = StringContains(srcTmp, "\"", "&quot;")
return srcTmp
}

func HandleHttpResponse(action string, headers map[string][]string, output IBaseModel, resp *http.Response, xmlResult bool, isObs bool) (err error) {
if IsHandleCallbackResponse(action, headers, isObs) {
if err = ParseCallbackResponseToBaseModel(resp, output, isObs); err != nil {
doLog(LEVEL_WARN, "Parse callback response to BaseModel with error: %v", err)
}
} else {
if err = ParseResponseToBaseModel(resp, output, xmlResult, isObs); err != nil {
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", err)
}
}
return
}

func IsHandleCallbackResponse(action string, headers map[string][]string, isObs bool) bool {
var headerPrefix = HEADER_PREFIX
if isObs == true {
headerPrefix = HEADER_PREFIX_OBS
}
supportCallbackActions := []string{PUT_OBJECT, PUT_FILE, "CompleteMultipartUpload"}
return len(headers[headerPrefix+CALLBACK]) != 0 && IsContain(supportCallbackActions, action)
}

func IsContain(items []string, item string) bool {
for _, eachItem := range items {
if eachItem == item {
return true
}
}
return false
}

// StringToInt converts string value to int value with default value
func StringToInt(value string, def int) int {
ret, err := strconv.Atoi(value)
if err != nil {
ret = def
}
return ret
}

// StringToInt64 converts string value to int64 value with default value
func StringToInt64(value string, def int64) int64 {
ret, err := strconv.ParseInt(value, 10, 64)
if err != nil {
ret = def
}
return ret
}

// IntToString converts int value to string value
func IntToString(value int) string {
return strconv.Itoa(value)
}

// Int64ToString converts int64 value to string value
func Int64ToString(value int64) string {
return strconv.FormatInt(value, 10)
}

// GetCurrentTimestamp gets unix time in milliseconds
func GetCurrentTimestamp() int64 {
return time.Now().UnixNano() / 1000000
}

// FormatUtcNow gets a textual representation of the UTC format time value
func FormatUtcNow(format string) string {
return time.Now().UTC().Format(format)
}

// FormatNowWithLoc gets a textual representation of the format time value with loc
func FormatNowWithLoc(format string, loc *time.Location) string {
return time.Now().In(loc).Format(format)
}

// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value
func FormatUtcToRfc1123(t time.Time) string {
ret := t.UTC().Format(time.RFC1123)
return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
}

// Md5 gets the md5 value of input
func Md5(value []byte) []byte {
m := md5.New()
_, err := m.Write(value)
if err != nil {
doLog(LEVEL_WARN, "MD5 failed to write")
}
return m.Sum(nil)
}

// HmacSha1 gets hmac sha1 value of input
func HmacSha1(key, value []byte) []byte {
mac := hmac.New(sha1.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha1 failed to write")
}
return mac.Sum(nil)
}

// HmacSha256 get hmac sha256 value if input
func HmacSha256(key, value []byte) []byte {
mac := hmac.New(sha256.New, key)
_, err := mac.Write(value)
if err != nil {
doLog(LEVEL_WARN, "HmacSha256 failed to write")
}
return mac.Sum(nil)
}

// Base64Encode wrapper of base64.StdEncoding.EncodeToString
func Base64Encode(value []byte) string {
return base64.StdEncoding.EncodeToString(value)
}

// Base64Decode wrapper of base64.StdEncoding.DecodeString
func Base64Decode(value string) ([]byte, error) {
return base64.StdEncoding.DecodeString(value)
}

// HexMd5 returns the md5 value of input in hexadecimal format
func HexMd5(value []byte) string {
return Hex(Md5(value))
}

// Base64Md5 returns the md5 value of input with Base64Encode
func Base64Md5(value []byte) string {
return Base64Encode(Md5(value))
}

// Base64Md5OrSha256 returns the md5 or sha256 value of input with Base64Encode
func Base64Md5OrSha256(value []byte, enableSha256 bool) string {
if enableSha256 {
return Base64Sha256(value)
}
return Base64Md5(value)
}

// Base64Sha256 returns the sha256 value of input with Base64Encode
func Base64Sha256(value []byte) string {
return Base64Encode(Sha256Hash(value))
}

// Sha256Hash returns sha256 checksum
func Sha256Hash(value []byte) []byte {
hash := sha256.New()
_, err := hash.Write(value)
if err != nil {
doLog(LEVEL_WARN, "Sha256Hash failed to write")
}
return hash.Sum(nil)
}

// ParseXml wrapper of xml.Unmarshal
func ParseXml(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return xml.Unmarshal(value, result)
}

// parseJSON wrapper of json.Unmarshal
func parseJSON(value []byte, result interface{}) error {
if len(value) == 0 {
return nil
}
return json.Unmarshal(value, result)
}

// TransToXml wrapper of xml.Marshal
func TransToXml(value interface{}) ([]byte, error) {
if value == nil {
return []byte{}, nil
}
return xml.Marshal(value)
}

// TransToJSON wrapper of json.Marshal
func TransToJSON(value interface{}) ([]byte, error) {
if value == nil {
return []byte{}, nil
}
return json.Marshal(value)
}

// Hex wrapper of hex.EncodeToString
func Hex(value []byte) string {
return hex.EncodeToString(value)
}

// HexSha256 returns the Sha256Hash value of input in hexadecimal format
func HexSha256(value []byte) string {
return Hex(Sha256Hash(value))
}

// UrlDecode wrapper of url.QueryUnescape
func UrlDecode(value string) (string, error) {
ret, err := url.QueryUnescape(value)
if err == nil {
return ret, nil
}
return "", err
}

// UrlDecodeWithoutError wrapper of UrlDecode
func UrlDecodeWithoutError(value string) string {
ret, err := UrlDecode(value)
if err == nil {
return ret
}
if isErrorLogEnabled() {
doLog(LEVEL_ERROR, "Url decode error")
}
return ""
}

// IsIP checks whether the value matches ip address
func IsIP(value string) bool {
return ipRegex.MatchString(value)
}

// UrlEncode encodes the input value
func UrlEncode(value string, chineseOnly bool) string {
if chineseOnly {
values := make([]string, 0, len(value))
for _, val := range value {
_value := string(val)
if regex.MatchString(_value) {
_value = url.QueryEscape(_value)
}
values = append(values, _value)
}
return strings.Join(values, "")
}
return url.QueryEscape(value)
}

func copyHeaders(m map[string][]string) (ret map[string][]string) {
if m != nil {
ret = make(map[string][]string, len(m))
for key, values := range m {
_values := make([]string, 0, len(values))
for _, value := range values {
_values = append(_values, value)
}
ret[strings.ToLower(key)] = _values
}
} else {
ret = make(map[string][]string)
}

return
}

func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) {
signature = "v2"
if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 {
if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) {
signature = "v4"
matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0])
if len(matches) >= 3 {
region = matches[1]
regions := regionRegex.FindStringSubmatch(region)
if len(regions) >= 2 {
region = regions[1]
}
signedHeaders = matches[2]
}

} else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) {
signature = "v2"
}
}
return
}

func getTemporaryKeys() []string {
return []string{
"Signature",
"signature",
"X-Amz-Signature",
"x-amz-signature",
}
}

func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool {
isObs := true
if isTemporary {
for _, value := range querys {
keyPrefix := strings.ToLower(value)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
} else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) {
isObs = false
}
}
} else {
for key := range headers {
keyPrefix := strings.ToLower(key)
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
isObs = false
break
}
}
}
return isObs
}

func isPathStyle(headers map[string][]string, bucketName string) bool {
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
return true
}
return false
}

// GetV2Authorization v2 Authorization
func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {

if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}

method = strings.ToUpper(method)

querys := strings.Split(queryURL, "&")
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
params := make(map[string]string)

for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
headers = copyHeaders(headers)
pathStyle := isPathStyle(headers, bucketName)
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}
conf.signature = SignatureObs
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true)
v2HashPrefix := OBS_HASH_PREFIX
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
return
}

func getQuerysResult(querys []string) []string {
querysResult := make([]string, 0)
for _, value := range querys {
if value != "=" && len(value) != 0 {
querysResult = append(querysResult, value)
}
}
return querysResult
}

func getParams(querysResult []string) map[string]string {
params := make(map[string]string)
for _, value := range querysResult {
kv := strings.Split(value, "=")
length := len(kv)
if length == 1 {
key := UrlDecodeWithoutError(kv[0])
params[key] = ""
} else if length >= 2 {
key := UrlDecodeWithoutError(kv[0])
vals := make([]string, 0, length-1)
for i := 1; i < length; i++ {
val := UrlDecodeWithoutError(kv[i])
vals = append(vals, val)
}
params[key] = strings.Join(vals, "=")
}
}
return params
}

func getTemporaryAndSignature(params map[string]string) (bool, string) {
isTemporary := false
signature := "v2"
temporaryKeys := getTemporaryKeys()
for _, key := range temporaryKeys {
if _, ok := params[key]; ok {
isTemporary = true
if strings.ToLower(key) == "signature" {
signature = "v2"
} else if strings.ToLower(key) == "x-amz-signature" {
signature = "v4"
}
break
}
}
return isTemporary, signature
}

// GetAuthorization Authorization
func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {

if strings.HasPrefix(queryURL, "?") {
queryURL = queryURL[1:]
}

method = strings.ToUpper(method)

querys := strings.Split(queryURL, "&")
querysResult := getQuerysResult(querys)
params := getParams(querysResult)

isTemporary, signature := getTemporaryAndSignature(params)

isObs := getIsObs(isTemporary, querysResult, headers)
headers = copyHeaders(headers)
pathStyle := false
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
pathStyle = true
}
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
pathStyle: pathStyle}

if isTemporary {
return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs)
}
signature, region, signedHeaders := parseHeaders(headers)
if signature == "v4" {
conf.signature = SignatureV4
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestURL")
return nil
}
headerKeys := strings.Split(signedHeaders, ";")
_headers := make(map[string][]string, len(headerKeys))
for _, headerKey := range headerKeys {
_headers[headerKey] = headers[headerKey]
}
ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers)
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
v2HashPrefix := V2_HASH_PREFIX
if isObs {
v2HashPrefix = OBS_HASH_PREFIX
}
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
}
return

}

func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string,
headers map[string][]string, isObs bool) (ret map[string]string) {

if signature == "v4" {
conf.signature = SignatureV4

longDate, ok := params[PARAM_DATE_AMZ_CAMEL]
if !ok {
longDate = params[HEADER_DATE_AMZ]
}
shortDate := longDate[:8]

credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL]
if !ok {
credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)]
}

_credential := UrlDecodeWithoutError(credential)

regions := regionRegex.FindStringSubmatch(_credential)
var region string
if len(regions) >= 2 {
region = regions[1]
}

_, scope := getCredential(ak, region, shortDate)

expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL]
if !ok {
expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)]
}

signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL]
if !ok {
signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)]
}

algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL]
if !ok {
algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)]
}

if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok {
delete(params, PARAM_SIGNATURE_AMZ_CAMEL)
} else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok {
delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL))
}

ret = make(map[string]string, 6)
ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm
ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
ret[PARAM_DATE_AMZ_CAMEL] = longDate
ret[PARAM_EXPIRES_AMZ_CAMEL] = expires
ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders

requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
parsedRequestURL, _err := url.Parse(requestURL)
if _err != nil {
doLog(LEVEL_WARN, "Failed to parse requestUrl")
return nil
}
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers)
ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false)
} else if signature == "v2" {
if isObs {
conf.signature = SignatureObs
} else {
conf.signature = SignatureV2
}
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
expires, ok := params["Expires"]
if !ok {
expires = params["expires"]
}
headers[HEADER_DATE_CAMEL] = []string{expires}
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
ret = make(map[string]string, 3)
ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false)
ret["AWSAccessKeyId"] = UrlEncode(ak, false)
ret["Expires"] = UrlEncode(expires, false)
}

return
}

func GetContentType(key string) (string, bool) {
if ct, ok := mimeTypes[strings.ToLower(key[strings.LastIndex(key, ".")+1:])]; ok {
return ct, ok
}
return "", false
}

func GetReaderLen(reader io.Reader) (int64, error) {
var contentLength int64
var err error
switch v := reader.(type) {
case *bytes.Buffer:
contentLength = int64(v.Len())
case *bytes.Reader:
contentLength = int64(v.Len())
case *strings.Reader:
contentLength = int64(v.Len())
case *os.File:
fInfo, fError := v.Stat()
if fError != nil {
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
} else {
contentLength = fInfo.Size()
}
case *io.LimitedReader:
contentLength = int64(v.N)
case *fileReaderWrapper:
contentLength = int64(v.totalCount)
case *readerWrapper:
contentLength = int64(v.totalCount)
default:
err = fmt.Errorf("can't get reader content length,unkown reader type")
}
return contentLength, err
}

func validateLength(value int, minLen int, maxLen int, fieldName string) error {
if minLen > maxLen {
return fmt.Errorf("Min Value can not be greater than Max Value")
}
if minLen == maxLen && value != minLen {
return fmt.Errorf("%s length must be %d characters. (value len: %d)", fieldName, maxLen, value)
}
if value < minLen || value > maxLen {
return fmt.Errorf("%s length must be between %d and %d characters. (value len: %d)", fieldName, minLen, maxLen, value)
}
return nil
}

+ 370
- 0
vendor/golang.org/x/net/http/httpproxy/proxy.go View File

@@ -0,0 +1,370 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package httpproxy provides support for HTTP proxy determination
// based on environment variables, as provided by net/http's
// ProxyFromEnvironment function.
//
// The API is not subject to the Go 1 compatibility promise and may change at
// any time.
package httpproxy

import (
"errors"
"fmt"
"net"
"net/url"
"os"
"strings"
"unicode/utf8"

"golang.org/x/net/idna"
)

// Config holds configuration for HTTP proxy settings. See
// FromEnvironment for details.
type Config struct {
// HTTPProxy represents the value of the HTTP_PROXY or
// http_proxy environment variable. It will be used as the proxy
// URL for HTTP requests unless overridden by NoProxy.
HTTPProxy string

// HTTPSProxy represents the HTTPS_PROXY or https_proxy
// environment variable. It will be used as the proxy URL for
// HTTPS requests unless overridden by NoProxy.
HTTPSProxy string

// NoProxy represents the NO_PROXY or no_proxy environment
// variable. It specifies a string that contains comma-separated values
// specifying hosts that should be excluded from proxying. Each value is
// represented by an IP address prefix (1.2.3.4), an IP address prefix in
// CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
// An IP address prefix and domain name can also include a literal port
// number (1.2.3.4:80).
// A domain name matches that name and all subdomains. A domain name with
// a leading "." matches subdomains only. For example "foo.com" matches
// "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
// A single asterisk (*) indicates that no proxying should be done.
// A best effort is made to parse the string and errors are
// ignored.
NoProxy string

// CGI holds whether the current process is running
// as a CGI handler (FromEnvironment infers this from the
// presence of a REQUEST_METHOD environment variable).
// When this is set, ProxyForURL will return an error
// when HTTPProxy applies, because a client could be
// setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
CGI bool
}

// config holds the parsed configuration for HTTP proxy settings.
type config struct {
// Config represents the original configuration as defined above.
Config

// httpsProxy is the parsed URL of the HTTPSProxy if defined.
httpsProxy *url.URL

// httpProxy is the parsed URL of the HTTPProxy if defined.
httpProxy *url.URL

// ipMatchers represent all values in the NoProxy that are IP address
// prefixes or an IP address in CIDR notation.
ipMatchers []matcher

// domainMatchers represent all values in the NoProxy that are a domain
// name or hostname & domain name
domainMatchers []matcher
}

// FromEnvironment returns a Config instance populated from the
// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
// lowercase versions thereof).
//
// The environment values may be either a complete URL or a
// "host[:port]", in which case the "http" scheme is assumed. An error
// is returned if the value is a different form.
func FromEnvironment() *Config {
return &Config{
HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"),
HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
CGI: os.Getenv("REQUEST_METHOD") != "",
}
}

func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}

// ProxyFunc returns a function that determines the proxy URL to use for
// a given request URL. Changing the contents of cfg will not affect
// proxy functions created earlier.
//
// A nil URL and nil error are returned if no proxy is defined in the
// environment, or a proxy should not be used for the given request, as
// defined by NO_PROXY.
//
// As a special case, if req.URL.Host is "localhost" or a loopback address
// (with or without a port number), then a nil URL and nil error will be returned.
func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
// Preprocess the Config settings for more efficient evaluation.
cfg1 := &config{
Config: *cfg,
}
cfg1.init()
return cfg1.proxyForURL
}

func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) {
var proxy *url.URL
if reqURL.Scheme == "https" {
proxy = cfg.httpsProxy
} else if reqURL.Scheme == "http" {
proxy = cfg.httpProxy
if proxy != nil && cfg.CGI {
return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy")
}
}
if proxy == nil {
return nil, nil
}
if !cfg.useProxy(canonicalAddr(reqURL)) {
return nil, nil
}

return proxy, nil
}

func parseProxy(proxy string) (*url.URL, error) {
if proxy == "" {
return nil, nil
}

proxyURL, err := url.Parse(proxy)
if err != nil ||
(proxyURL.Scheme != "http" &&
proxyURL.Scheme != "https" &&
proxyURL.Scheme != "socks5") {
// proxy was bogus. Try prepending "http://" to it and
// see if that parses correctly. If not, we fall
// through and complain about the original one.
if proxyURL, err := url.Parse("http://" + proxy); err == nil {
return proxyURL, nil
}
}
if err != nil {
return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
}
return proxyURL, nil
}

// useProxy reports whether requests to addr should use a proxy,
// according to the NO_PROXY or no_proxy environment variable.
// addr is always a canonicalAddr with a host and port.
func (cfg *config) useProxy(addr string) bool {
if len(addr) == 0 {
return true
}
host, port, err := net.SplitHostPort(addr)
if err != nil {
return false
}
if host == "localhost" {
return false
}
ip := net.ParseIP(host)
if ip != nil {
if ip.IsLoopback() {
return false
}
}

addr = strings.ToLower(strings.TrimSpace(host))

if ip != nil {
for _, m := range cfg.ipMatchers {
if m.match(addr, port, ip) {
return false
}
}
}
for _, m := range cfg.domainMatchers {
if m.match(addr, port, ip) {
return false
}
}
return true
}

func (c *config) init() {
if parsed, err := parseProxy(c.HTTPProxy); err == nil {
c.httpProxy = parsed
}
if parsed, err := parseProxy(c.HTTPSProxy); err == nil {
c.httpsProxy = parsed
}

for _, p := range strings.Split(c.NoProxy, ",") {
p = strings.ToLower(strings.TrimSpace(p))
if len(p) == 0 {
continue
}

if p == "*" {
c.ipMatchers = []matcher{allMatch{}}
c.domainMatchers = []matcher{allMatch{}}
return
}

// IPv4/CIDR, IPv6/CIDR
if _, pnet, err := net.ParseCIDR(p); err == nil {
c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet})
continue
}

// IPv4:port, [IPv6]:port
phost, pport, err := net.SplitHostPort(p)
if err == nil {
if len(phost) == 0 {
// There is no host part, likely the entry is malformed; ignore.
continue
}
if phost[0] == '[' && phost[len(phost)-1] == ']' {
phost = phost[1 : len(phost)-1]
}
} else {
phost = p
}
// IPv4, IPv6
if pip := net.ParseIP(phost); pip != nil {
c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport})
continue
}

if len(phost) == 0 {
// There is no host part, likely the entry is malformed; ignore.
continue
}

// domain.com or domain.com:80
// foo.com matches bar.foo.com
// .domain.com or .domain.com:port
// *.domain.com or *.domain.com:port
if strings.HasPrefix(phost, "*.") {
phost = phost[1:]
}
matchHost := false
if phost[0] != '.' {
matchHost = true
phost = "." + phost
}
if v, err := idnaASCII(phost); err == nil {
phost = v
}
c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost})
}
}

var portMap = map[string]string{
"http": "80",
"https": "443",
"socks5": "1080",
}

// canonicalAddr returns url.Host but always with a ":port" suffix
func canonicalAddr(url *url.URL) string {
addr := url.Hostname()
if v, err := idnaASCII(addr); err == nil {
addr = v
}
port := url.Port()
if port == "" {
port = portMap[url.Scheme]
}
return net.JoinHostPort(addr, port)
}

// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }

func idnaASCII(v string) (string, error) {
// TODO: Consider removing this check after verifying performance is okay.
// Right now punycode verification, length checks, context checks, and the
// permissible character tests are all omitted. It also prevents the ToASCII
// call from salvaging an invalid IDN, when possible. As a result it may be
// possible to have two IDNs that appear identical to the user where the
// ASCII-only version causes an error downstream whereas the non-ASCII
// version does not.
// Note that for correct ASCII IDNs ToASCII will only do considerably more
// work, but it will not cause an allocation.
if isASCII(v) {
return v, nil
}
return idna.Lookup.ToASCII(v)
}

func isASCII(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}

// matcher represents the matching rule for a given value in the NO_PROXY list
type matcher interface {
// match returns true if the host and optional port or ip and optional port
// are allowed
match(host, port string, ip net.IP) bool
}

// allMatch matches on all possible inputs
type allMatch struct{}

func (a allMatch) match(host, port string, ip net.IP) bool {
return true
}

type cidrMatch struct {
cidr *net.IPNet
}

func (m cidrMatch) match(host, port string, ip net.IP) bool {
return m.cidr.Contains(ip)
}

type ipMatch struct {
ip net.IP
port string
}

func (m ipMatch) match(host, port string, ip net.IP) bool {
if m.ip.Equal(ip) {
return m.port == "" || m.port == port
}
return false
}

type domainMatch struct {
host string
port string

matchHost bool
}

func (m domainMatch) match(host, port string, ip net.IP) bool {
if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) {
return m.port == "" || m.port == port
}
return false
}

+ 4
- 2
vendor/modules.txt View File

@@ -672,6 +672,9 @@ github.com/hashicorp/hcl/json/token
# github.com/huandu/xstrings v1.3.0
## explicit; go 1.12
github.com/huandu/xstrings
# github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.9+incompatible
## explicit
github.com/huaweicloud/huaweicloud-sdk-go-obs/obs
# github.com/imdario/mergo v0.3.15
## explicit; go 1.13
github.com/imdario/mergo
@@ -1146,8 +1149,6 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
# golang.org/x/exp v0.0.0-20231127185646-65229373498e
## explicit; go 1.20
# golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8
## explicit; go 1.12
golang.org/x/image/bmp
@@ -1167,6 +1168,7 @@ golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/html/charset
golang.org/x/net/http/httpguts
golang.org/x/net/http/httpproxy
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna


+ 1
- 1
vendor/xorm.io/xorm/dialects/postgres.go View File

@@ -1014,7 +1014,7 @@ WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s A

schema := db.getSchema()
if schema != "" {
s = fmt.Sprintf(s, " AND s.table_schema = $2")
s = fmt.Sprintf(s, "AND s.table_schema = $2")
args = append(args, schema)
} else {
s = fmt.Sprintf(s, "")


+ 2
- 2
web_src/vuepages/pages/guide/components/FileUpload.vue View File

@@ -380,10 +380,10 @@ export default {
console.log('getNewMultipart', err);
this.$message({
type: 'error',
message: err,
message: err.message,
});
this.uploading = false
this.uploadError(file, info);
this.uploadError(file, this.$t('modelManage.uploadFailed'));
this.updateFileStatus(file, this.$t('modelManage.uploadFailed'), 0, 2);
return err;
});


Loading…
Cancel
Save
Baidu
map