diff --git a/Gopkg.toml b/Gopkg.toml index f0d3717d..b3f675b4 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -69,6 +69,10 @@ branch = "master" name = "github.com/tencentyun/cos-go-sdk-v5" +[[constraint]] + branch = "master" + name = "github.com/NetEase-Object-Storage/nos-golang-sdk" + [prune] go-tests = true unused-packages = true diff --git a/netease.go b/netease.go new file mode 100644 index 00000000..cd5069f6 --- /dev/null +++ b/netease.go @@ -0,0 +1,209 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "bytes" + "io/ioutil" + "os" + pathutil "path" + + "time" + + "github.com/NetEase-Object-Storage/nos-golang-sdk/config" + "github.com/NetEase-Object-Storage/nos-golang-sdk/logger" + "github.com/NetEase-Object-Storage/nos-golang-sdk/model" + "github.com/NetEase-Object-Storage/nos-golang-sdk/nosclient" +) + +// NeteaseNOSBackend is a storage backend for Netease Cloud NOS +type NeteaseNOSBackend struct { + Client nosclient.NosClient + Bucket string + Prefix string +} + +// NewNeteaseNOSBackend creates a new instance of NeteaseNOSBackend +func NewNeteaseNOSBackend(bucket string, prefix string, endpoint string) *NeteaseNOSBackend { + accessKeyId := os.Getenv("NETEASE_CLOUD_ACCESS_KEY_ID") + accessKeySecret := os.Getenv("NETEASE_CLOUD_ACCESS_KEY_SECRET") + + if len(accessKeyId) == 0 { + panic("NETEASE_CLOUD_ACCESS_KEY_ID environment variable is not set") + } + + if len(accessKeySecret) == 0 { + panic("NETEASE_CLOUD_ACCESS_KEY_SECRET environment variable is not set") + } + + if len(endpoint) == 0 { + // Set default endpoint + endpoint = "nos-eastchina1.126.net" + } + + conf := &config.Config{ + Endpoint: endpoint, + AccessKey: accessKeyId, + SecretKey: accessKeySecret, + NosServiceConnectTimeout: 3, + NosServiceReadWriteTimeout: 5, + NosServiceMaxIdleConnection: 15, + LogLevel: logger.LogLevel(logger.DEBUG), + Logger: logger.NewDefaultLogger(), + } + + client, err := nosclient.New(conf) + if err != nil { + panic("Failed to create NOS client: " + err.Error()) + } + + b := &NeteaseNOSBackend{ + Client: *client, + Bucket: bucket, + Prefix: prefix, + } + return b +} + +// ListObjects lists all objects in Netease Cloud NOS bucket, at prefix +func (b NeteaseNOSBackend) ListObjects(prefix string) ([]Object, error) { + var objects []Object + + prefix = pathutil.Join(b.Prefix, prefix) + + listRequest := &model.ListObjectsRequest{ + Bucket: b.Bucket, + Prefix: prefix, + Delimiter: "", + Marker: "", + MaxKeys: 100, + } + + for { + var lor *model.ListObjectsResult + lor, err := b.Client.ListObjects(listRequest) + if err != nil { + return objects, nil + } + + for _, obj := range lor.Contents { + path := removePrefixFromObjectPath(prefix, obj.Key) + if objectPathIsInvalid(path) { + continue + } + + local, _ := time.LoadLocation("Local") + // LastModified time layout in NOS is 2006-01-02T15:04:05 -0700 + t, _ := time.ParseInLocation("2006-01-02T15:04:05 -0700", obj.LastModified, local) + object := Object{ + Path: path, + Content: []byte{}, + LastModified: t, + } + objects = append(objects, object) + } + if !lor.IsTruncated { + break + } + + } + + return objects, nil +} + +// GetObject retrieves an object from Netease Cloud NOS bucket, at prefix +func (b NeteaseNOSBackend) GetObject(path string) (Object, error) { + var object Object + object.Path = path + var content []byte + key := pathutil.Join(b.Prefix, path) + + objectRequest := &model.GetObjectRequest{ + Bucket: b.Bucket, + Object: key, + } + + var nosObject *model.NOSObject + nosObject, err := b.Client.GetObject(objectRequest) + if err != nil { + return object, err + } + + body := nosObject.Body + content, err = ioutil.ReadAll(body) + defer body.Close() + if err != nil { + return object, err + } + + object.Content = content + objectMetaRequest := &model.ObjectRequest{ + Bucket: b.Bucket, + Object: key, + } + + var meta *model.ObjectMetadata + meta, err = b.Client.GetObjectMetaData(objectMetaRequest) + if err != nil { + return object, err + } + + m := meta.Metadata + // "Last-Modified" 是从nos获取的存储 最后修改时间 的key + if t, ok := m["Last-Modified"]; ok { + + local, _ := time.LoadLocation("Local") + // NOS的LastModified格式为 2019-04-18T16:55:39 +0800 + lastModified, _ := time.ParseInLocation("2006-01-02T15:04:05 -0700", t, local) + object.LastModified = lastModified + } + + return object, nil +} + +// PutObject uploads an object to Netease Cloud NOS bucket, at prefix +func (b NeteaseNOSBackend) PutObject(path string, content []byte) error { + key := pathutil.Join(b.Prefix, path) + var err error + + metadata := &model.ObjectMetadata{ + Metadata: map[string]string{}, + ContentLength: int64(len(content)), + } + + putObjectRequest := &model.PutObjectRequest{ + Bucket: b.Bucket, + Object: key, + Body: bytes.NewReader(content), + Metadata: metadata, + } + _, err = b.Client.PutObjectByStream(putObjectRequest) + return err +} + +// DeleteObject removes an object from Netease Cloud NOS bucket, at prefix +func (b NeteaseNOSBackend) DeleteObject(path string) error { + key := pathutil.Join(b.Prefix, path) + + objectRequest := &model.ObjectRequest{ + Bucket: b.Bucket, + Object: key, + } + + err := b.Client.DeleteObject(objectRequest) + return err +} diff --git a/netease_test.go b/netease_test.go new file mode 100644 index 00000000..df5db4f8 --- /dev/null +++ b/netease_test.go @@ -0,0 +1,89 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/suite" +) + +type NeteaseTestSuite struct { + suite.Suite + BrokenNeteaseNOSBackend *NeteaseNOSBackend + NoPrefixNeteaseNOSBackend *NeteaseNOSBackend +} + +const nosTestCount = 100 + +func (suite *NeteaseTestSuite) SetupSuite() { + backend := NewNeteaseNOSBackend("fake-container-cant-exist-fbce123", "", "") + suite.BrokenNeteaseNOSBackend = backend + + nosBucket := os.Getenv("TEST_STORAGE_NETEASE_BUCKET") + nosEndpoint := os.Getenv("TEST_STORAGE_NETEASE_ENDPOINT") + backend = NewNeteaseNOSBackend(nosBucket, "", nosEndpoint) + suite.NoPrefixNeteaseNOSBackend = backend + + data := []byte("some object") + path := "deleteme.txt" + + for i := 0; i < nosTestCount; i++ { + newPath := strconv.Itoa(i) + path + err := suite.NoPrefixNeteaseNOSBackend.PutObject(newPath, data) + suite.Nil(err, "no error putting deleteme.txt using Netease Cloud NOS backend") + } +} + +func (suite *NeteaseTestSuite) TearDownSuite() { + path := "deleteme.txt" + for i := 0; i < bosTestCount; i++ { + newPath := strconv.Itoa(i) + path + + err := suite.NoPrefixNeteaseNOSBackend.DeleteObject(newPath) + suite.Nil(err, "no error deleting deleteme.txt using Netease NOS backend") + } +} + +func (suite *NeteaseTestSuite) TestListObjects() { + _, err := suite.BrokenNeteaseNOSBackend.ListObjects("") + suite.NotNil(err, "cannot list objects with bad bucket") + + objs, err := suite.NoPrefixNeteaseNOSBackend.ListObjects("") + suite.Nil(err, "can list objects with good bucket, no prefix") + suite.Equal(len(objs), nosTestCount, "able to list objects") +} + +func (suite *NeteaseTestSuite) TestGetObject() { + _, err := suite.BrokenNeteaseNOSBackend.GetObject("this-file-cannot-possibly-exist.tgz") + suite.NotNil(err, "cannot get objects with bad bucket") +} + +func (suite *NeteaseTestSuite) TestPutObject() { + err := suite.BrokenNeteaseNOSBackend.PutObject("this-file-will-not-upload.txt", []byte{}) + suite.NotNil(err, "cannot put objects with bad bucket") +} + +func TestNeteaseStorageTestSuite(t *testing.T) { + if os.Getenv("TEST_CLOUD_STORAGE") == "1" && + os.Getenv("TEST_STORAGE_NETEASE_BUCKET") != "" && + os.Getenv("TEST_STORAGE_NETEASE_ENDPOINT") != "" { + suite.Run(t, new(NeteaseTestSuite)) + } +} diff --git a/storage_test.go b/storage_test.go index f7d9b0a6..1075dc05 100644 --- a/storage_test.go +++ b/storage_test.go @@ -58,6 +58,8 @@ func (suite *StorageTestSuite) setupStorageBackends() { bosEndpoint := os.Getenv("TEST_STORAGE_BAIDU_ENDPOINT") cosBucket := os.Getenv("TEST_STORAGE_TENCENT_BUCKET") cosEndpoint := os.Getenv("TEST_STORAGE_TENCENT_ENDPOINT") + nosBucket := os.Getenv("TEST_STORAGE_NETEASE_BUCKET") + nosEndpoint := os.Getenv("TEST_STORAGE_NETEASE_ENDPOINT") if s3Bucket != "" && s3Region != "" { suite.StorageBackends["AmazonS3"] = Backend(NewAmazonS3Backend(s3Bucket, prefix, s3Region, "", "")) } @@ -82,6 +84,9 @@ func (suite *StorageTestSuite) setupStorageBackends() { if cosBucket != "" { suite.StorageBackends["TencentCloudCOS"] = Backend(NewTencentCloudCOSBackend(cosBucket, prefix, cosEndpoint)) } + if nosBucket != "" { + suite.StorageBackends["NeteaseCloudNOS"] = Backend(NewNeteaseNOSBackend(nosBucket, prefix, nosEndpoint)) + } } } diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/auth/nosauth.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/auth/nosauth.go new file mode 100644 index 00000000..d273982a --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/auth/nosauth.go @@ -0,0 +1,89 @@ +package auth + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "net/http" + "sort" + "strings" +) + +var subResources map[string]bool = map[string]bool{ + "acl": true, + "location": true, + "versioning": true, + "versions": true, + "versionId": true, + "uploadId": true, + "uploads": true, + "partNumber": true, + "delete": true, + "deduplication": true, +} + +func SignRequest(request *http.Request, publicKey string, secretKey string, + bucket string, encodedObject string) string { + + stringToSign := "" + stringToSign += (request.Method + "\n") + stringToSign += (request.Header.Get("Content-MD5") + "\n") + stringToSign += (request.Header.Get("Content-Type") + "\n") + stringToSign += (request.Header.Get("Date") + "\n") + + var headerKeys sort.StringSlice + for origKey, _ := range request.Header { + key := strings.ToLower(origKey) + if strings.HasPrefix(key, "x-nos-") { + headerKeys = append(headerKeys, origKey) + } + } + + headerKeys.Sort() + + for i := 0; i < headerKeys.Len(); i++ { + key := strings.ToLower(headerKeys[i]) + stringToSign += (key + ":" + request.Header.Get(headerKeys[i]) + "\n") + } + + stringToSign += (getResource(bucket, encodedObject)) + + request.ParseForm() + + var keys sort.StringSlice + for key := range request.Form { + if _, ok := subResources[key]; ok { + keys = append(keys, key) + } + } + keys.Sort() + + for i := 0; i < keys.Len(); i++ { + if i == 0 { + stringToSign += "?" + } + stringToSign += keys[i] + if val := request.Form[keys[i]]; val[0] != "" { + stringToSign += ("=" + val[0]) + } + + if i < keys.Len()-1 { + stringToSign += "&" + } + } + key := []byte(secretKey) + h := hmac.New(sha256.New, key) + h.Write([]byte(stringToSign)) + return "NOS " + publicKey + ":" + base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func getResource(bucket string, encodedObject string) string { + resource := "/" + if bucket != "" { + resource += bucket + "/" + } + if encodedObject != "" { + resource += encodedObject + } + return resource +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/config/config.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/config/config.go new file mode 100644 index 00000000..bb4f7bdf --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/config/config.go @@ -0,0 +1,61 @@ +package config + +import ( + "github.com/NetEase-Object-Storage/nos-golang-sdk/logger" + "github.com/NetEase-Object-Storage/nos-golang-sdk/noserror" + "github.com/NetEase-Object-Storage/nos-golang-sdk/utils" +) + +type Config struct { + Endpoint string + AccessKey string + SecretKey string + + NosServiceConnectTimeout int + NosServiceReadWriteTimeout int + NosServiceMaxIdleConnection int + + LogLevel *logger.LogLevelType + + Logger logger.Logger +} + +func (conf *Config) Check() error { + if conf.Endpoint == "" { + return utils.ProcessClientError(noserror.ERROR_CODE_CFG_ENDPOINT, "", "", "") + } + + if conf.NosServiceConnectTimeout < 0 { + return utils.ProcessClientError(noserror.ERROR_CODE_CFG_CONNECT_TIMEOUT, "", "", "") + } + + if conf.NosServiceReadWriteTimeout < 0 { + return utils.ProcessClientError(noserror.ERROR_CODE_CFG_READWRITE_TIMEOUT, "", "", "") + } + + if conf.NosServiceMaxIdleConnection < 0 { + return utils.ProcessClientError(noserror.ERROR_CODE_CFG_MAXIDLECONNECT, "", "", "") + } + + if conf.NosServiceConnectTimeout == 0 { + conf.NosServiceConnectTimeout = 30 + } + + if conf.NosServiceReadWriteTimeout == 0 { + conf.NosServiceReadWriteTimeout = 60 + } + + if conf.NosServiceMaxIdleConnection == 0 { + conf.NosServiceMaxIdleConnection = 60 + } + + if conf.Logger == nil { + conf.Logger = logger.NewDefaultLogger() + } + + if conf.LogLevel == nil { + conf.LogLevel = logger.LogLevel(logger.DEBUG) + } + + return nil +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/httpclient/httpclient.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/httpclient/httpclient.go new file mode 100644 index 00000000..89e018bf --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/httpclient/httpclient.go @@ -0,0 +1,237 @@ +/* +Provides an HTTP Transport that implements the `RoundTripper` interface and +can be used as a built in replacement for the standard library's, providing: + + * connection timeouts + * request timeouts + +This is a thin wrapper around `http.Transport` that sets dial timeouts and uses +Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API. +*/ +package httpclient + +import ( + "crypto/tls" + "errors" + "io" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +// returns the current version of the package +func Version() string { + return "0.4.1" +} + +// Transport implements the RoundTripper interface and can be used as a replacement +// for Go's built in http.Transport implementing end-to-end request timeouts. +// +// transport := &httpclient.Transport{ +// ConnectTimeout: 1*time.Second, +// ResponseHeaderTimeout: 5*time.Second, +// RequestTimeout: 10*time.Second, +// } +// defer transport.Close() +// +// client := &http.Client{Transport: transport} +// req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil) +// resp, err := client.Do(req) +// if err != nil { +// return err +// } +// defer resp.Body.Close() +// +type Transport struct { + // Proxy specifies a function to return a proxy for a given + // *http.Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *url.URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // Dial specifies the dial function for creating TCP + // connections. This will override the Transport's ConnectTimeout and + // ReadWriteTimeout settings. + // If Dial is nil, a dialer is generated on demand matching the Transport's + // options. + Dial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // DisableKeepAlives, if true, prevents re-use of TCP connections + // between different HTTP requests. + DisableKeepAlives bool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + // (keep-alive) to keep per-host. If zero, + // http.DefaultMaxIdleConnsPerHost is used. + MaxIdleConnsPerHost int + + // ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for + // a connect to complete. + ConnectTimeout time.Duration + + // ResponseHeaderTimeout, if non-zero, specifies the amount of + // time to wait for a server's response headers after fully + // writing the request (including its body, if any). This + // time does not include the time to read the response body. + ResponseHeaderTimeout time.Duration + + // RequestTimeout, if non-zero, specifies the amount of time for the entire + // request to complete (including all of the above timeouts + entire response body). + // This should never be less than the sum total of the above two timeouts. + RequestTimeout time.Duration + + // ReadWriteTimeout, if non-zero, will set a deadline for every Read and + // Write operation on the request connection. + ReadWriteTimeout time.Duration + + // TCPWriteBufferSize, the size of the operating system's write + // buffer associated with the connection. + TCPWriteBufferSize int + + // TCPReadBuffserSize, the size of the operating system's read + // buffer associated with the connection. + TCPReadBufferSize int + + starter sync.Once + transport *http.Transport +} + +// Close cleans up the Transport, currently a no-op +func (t *Transport) Close() error { + return nil +} + +func (t *Transport) lazyStart() { + if t.Dial == nil { + t.Dial = func(netw, addr string) (net.Conn, error) { + c, err := net.DialTimeout(netw, addr, t.ConnectTimeout) + if err != nil { + return nil, err + } + + if t.TCPReadBufferSize != 0 || t.TCPWriteBufferSize != 0 { + if tcpCon, ok := c.(*net.TCPConn); ok { + if t.TCPWriteBufferSize != 0 { + if err = tcpCon.SetWriteBuffer(t.TCPWriteBufferSize); err != nil { + return nil, err + } + } + if t.TCPReadBufferSize != 0 { + if err = tcpCon.SetReadBuffer(t.TCPReadBufferSize); err != nil { + return nil, err + } + } + } else { + err = errors.New("Not Tcp Connection") + return nil, err + } + } + + if t.ReadWriteTimeout > 0 { + timeoutConn := &rwTimeoutConn{ + TCPConn: c.(*net.TCPConn), + rwTimeout: t.ReadWriteTimeout, + } + return timeoutConn, nil + } + return c, nil + } + } + + t.transport = &http.Transport{ + Dial: t.Dial, + Proxy: t.Proxy, + TLSClientConfig: t.TLSClientConfig, + DisableKeepAlives: t.DisableKeepAlives, + DisableCompression: t.DisableCompression, + MaxIdleConnsPerHost: t.MaxIdleConnsPerHost, + ResponseHeaderTimeout: t.ResponseHeaderTimeout, + } +} + +func (t *Transport) CancelRequest(req *http.Request) { + t.starter.Do(t.lazyStart) + + t.transport.CancelRequest(req) +} + +func (t *Transport) CloseIdleConnections() { + t.starter.Do(t.lazyStart) + + t.transport.CloseIdleConnections() +} + +func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) { + t.starter.Do(t.lazyStart) + + t.transport.RegisterProtocol(scheme, rt) +} + +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + t.starter.Do(t.lazyStart) + + if t.RequestTimeout > 0 { + timer := time.AfterFunc(t.RequestTimeout, func() { + t.transport.CancelRequest(req) + }) + + resp, err = t.transport.RoundTrip(req) + if err != nil { + timer.Stop() + } else { + resp.Body = &bodyCloseInterceptor{ReadCloser: resp.Body, timer: timer} + } + } else { + resp, err = t.transport.RoundTrip(req) + } + + return +} + +type bodyCloseInterceptor struct { + io.ReadCloser + timer *time.Timer +} + +func (bci *bodyCloseInterceptor) Close() error { + bci.timer.Stop() + return bci.ReadCloser.Close() +} + +// A net.Conn that sets a deadline for every Read or Write operation +type rwTimeoutConn struct { + *net.TCPConn + rwTimeout time.Duration +} + +func (c *rwTimeoutConn) Read(b []byte) (int, error) { + err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout)) + if err != nil { + return 0, err + } + return c.TCPConn.Read(b) +} + +func (c *rwTimeoutConn) Write(b []byte) (int, error) { + err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout)) + if err != nil { + return 0, err + } + return c.TCPConn.Write(b) +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/logger/logger.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/logger/logger.go new file mode 100644 index 00000000..b34432be --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/logger/logger.go @@ -0,0 +1,247 @@ +package logger + +import ( + "log" + "os" +) + + +// These are the integer logging levels used by the logger +type Level int + +const ( + LOGOFF LogLevelType = iota + FINE + DEBUG + TRACE + INFO + WARNING + ERROR + CRITICAL +) + + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// Logging level strings +var ( + levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} +) + +func (l Level) String() string { + if l < 0 || int(l) > len(levelStrings) { + return "UNKNOWN" + } + return levelStrings[int(l)] +} + +/****** Variables ******/ +var ( + // LogBufferLength specifies how many log messages a particular log4go + // logger can buffer at a time before writing them. + LogBufferLength = 32 +) + + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +// to LogOff comparison. +func (l *LogLevelType) logOff(v LogLevelType) bool { + c := l.Value() + + if c == LOGOFF { + return true + } + + return c > v +} + + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} + + +type NosLog struct { + LogLevel *LogLevelType + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger +} + + +func (nosLog NosLog) Debug(args ...interface{}) { + if nosLog.LogLevel.logOff(DEBUG) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + + +func (nosLog NosLog) Trace(args ...interface{}) { + if nosLog.LogLevel.logOff(TRACE) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + + +func (nosLog NosLog) Info(args ...interface{}) { + if nosLog.LogLevel.logOff(INFO) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + + +func (nosLog NosLog) Warn(args ...interface{}) { + if nosLog.LogLevel.logOff(WARNING) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + + +func (nosLog NosLog) Error(args ...interface{}) { + if nosLog.LogLevel.logOff(ERROR) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + + +func (nosLog NosLog) Critical(args ...interface{}) { + if nosLog.LogLevel.logOff(CRITICAL) { + return + } + + if nosLog.Logger == nil { + return + } + + nosLog.Logger.Log(args...) +} + diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosrequest.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosrequest.go new file mode 100644 index 00000000..85e3a28b --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosrequest.go @@ -0,0 +1,144 @@ +package model + +import ( + "encoding/xml" + "io" +) + +// Create Bucket + +type CreateBucketRequest struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + Location string `xml:"LocationConstraint"` +} + + + +// CompleteMultiUpload +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` + Etag string `xml:"ETag"` +} + +type UploadParts struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []UploadPart `xml:"Part"` +} + +func (uploadParts *UploadParts) Append(part UploadPart) { + uploadParts.Parts = append(uploadParts.Parts, part) +} + +// DeleteMultiObjects +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` +} + +type DeleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool `xml:"Quiet"` + Objects []DeleteObject `xml:"Object"` +} + +func (deleteMulti *DeleteMultiObjects) Append(object DeleteObject) { + deleteMulti.Objects = append(deleteMulti.Objects, object) +} + +type ObjectMetadata struct { + ContentLength int64 + Metadata map[string]string +} + +type PutObjectRequest struct { + Bucket string + Object string + Body io.ReadSeeker + FilePath string + Metadata *ObjectMetadata +} + +type CopyObjectRequest struct { + SrcBucket string + SrcObject string + DestBucket string + DestObject string +} + +type MoveObjectRequest struct { + SrcBucket string + SrcObject string + DestBucket string + DestObject string +} + +type DeleteMultiObjectsRequest struct { + Bucket string + DelectObjects *DeleteMultiObjects +} + +type GetObjectRequest struct { + Bucket string + Object string + ObjRange string + IfModifiedSince string +} + +type ObjectRequest struct { + Bucket string + Object string +} + +type ListObjectsRequest struct { + Bucket string + Prefix string + Delimiter string + Marker string + MaxKeys int +} + +type InitMultiUploadRequest struct { + Bucket string + Object string + Metadata *ObjectMetadata +} + +type UploadPartRequest struct { + Bucket string + Object string + UploadId string + PartNumber int + Content []byte + PartSize int64 + ContentMd5 string +} + +type CompleteMultiUploadRequest struct { + Bucket string + Object string + UploadId string + Parts []UploadPart + ContentMd5 string + ObjectMd5 string +} + +type AbortMultiUploadRequest struct { + Bucket string + Object string + UploadId string +} + +type ListUploadPartsRequest struct { + Bucket string + Object string + UploadId string + MaxParts int + PartNumberMarker int +} + +type ListMultiUploadsRequest struct { + Bucket string + KeyMarker string + MaxUploads int +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosresponse.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosresponse.go new file mode 100644 index 00000000..dbcb17c3 --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/model/nosresponse.go @@ -0,0 +1,120 @@ +package model + +import ( + "encoding/xml" + "io" +) + +type ObjectResult struct { + Etag string + RequestId string +} + +type NOSObject struct { + Key string + BucketName string + ObjectMetadata *ObjectMetadata + Body io.ReadCloser `type:"blob"` +} + +type DeleteError struct { + XMLName xml.Name `xml:"Error"` + Key string `xml:"Key"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +type DeleteKey struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` +} + +type DeleteObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + Deleted []DeleteKey `xml:"Deleted"` + Error []DeleteError `xml:"Error"` +} + +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Bucket string `xml:"Name"` + Prefix string `xml:"Prefix"` + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + MaxKeys string `xml:"MaxKeys"` + NextMarker string `xml:"NextMarker"` + IsTruncated bool `xml:"IsTruncated"` + Contents []Contents `xml:"Contents"` +} + +type InitMultiUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` + Object string `xml:"Key"` + UploadId string `xml:"UploadId"` +} + +type CompleteMultiUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + Etag string `xml:"ETag"` +} + +type Owner struct { + XMLName xml.Name `xml:"Owner"` + Id string `xml:"ID"` + DisplayName string `xml:"DisplayName"` +} + +type UploadPartRet struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` + LastModified string `xml:"LastModified"` + Etag string `xml:"ETag"` + Size int `xml:"Size"` +} + +type ListPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + Owner Owner `xml:"Owner"` + StorageClass string `xml:"StorageClass"` + PartNumberMarker int `xml:"PartNumberMarker"` + NextPartNumberMarker int `xml:"NextPartNumberMarker"` + MaxPart int `xml:"MaxParts"` + IsTruncated bool `xml:"IsTruncated"` + Parts []UploadPartRet `xml:"Part"` +} + +type Contents struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` + LastModified string `xml:"LastModified"` + Etag string `xml:"Etag"` + Size int64 `xml:"Size"` +} + +type CommonPrefix struct { + XMLName xml.Name `xml:"CommonPrefixes"` + Prefix string `xml:"Prefix"` +} + +type MultipartUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` + UploadId string `xml:"UploadId"` + StorageClass string `xml:"StorageClass"` + Owner Owner `xml:"Owner"` + Initiated string `xml:"Initiated"` +} + +type ListMultiUploadsResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` + NextKeyMarker string `xml:"NextKeyMarker"` + IsTruncated bool `xml:"IsTruncated"` + Uploads []MultipartUpload `xml:"Upload"` +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosclient/nosclient.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosclient/nosclient.go new file mode 100644 index 00000000..1d7c029f --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosclient/nosclient.go @@ -0,0 +1,914 @@ +package nosclient + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/xml" + "errors" + "github.com/NetEase-Object-Storage/nos-golang-sdk/auth" + "github.com/NetEase-Object-Storage/nos-golang-sdk/config" + "github.com/NetEase-Object-Storage/nos-golang-sdk/httpclient" + "github.com/NetEase-Object-Storage/nos-golang-sdk/logger" + "github.com/NetEase-Object-Storage/nos-golang-sdk/model" + "github.com/NetEase-Object-Storage/nos-golang-sdk/nosconst" + "github.com/NetEase-Object-Storage/nos-golang-sdk/noserror" + "github.com/NetEase-Object-Storage/nos-golang-sdk/utils" + "io" + "net/http" + "net/url" + "os" + "strconv" + "time" +) + +type NosClient struct { + endPoint string + accessKey string + secretKey string + + httpClient *http.Client + Log logger.NosLog +} + +func NewHttpClient(connectTimeout, requestTimeout, readWriteTimeout, + maxIdleConnection int) *http.Client { + + tr := &httpclient.Transport{ + ConnectTimeout: time.Duration(connectTimeout) * time.Second, + RequestTimeout: time.Duration(requestTimeout) * time.Second, + ReadWriteTimeout: time.Duration(readWriteTimeout) * time.Second, + DisableKeepAlives: false, + MaxIdleConnsPerHost: maxIdleConnection, + } + + return &http.Client{Transport: tr} +} + +// New constructs a new Driver with the given NOS credentials, bucket, chunksize flag +func New(conf *config.Config) (*NosClient, error) { + noserror.Init() + + err := conf.Check() + if err != nil { + return nil, err + } + + client := &NosClient{ + endPoint: conf.Endpoint, + accessKey: conf.AccessKey, + secretKey: conf.SecretKey, + + httpClient: NewHttpClient( + conf.NosServiceConnectTimeout, + conf.NosServiceReadWriteTimeout, + conf.NosServiceReadWriteTimeout, + conf.NosServiceMaxIdleConnection), + + Log: logger.NosLog{ + LogLevel: conf.LogLevel, + Logger: conf.Logger, + }, + } + + return client, nil +} + +func (client *NosClient) getNosRequest(method, bucket, object string, metadata *model.ObjectMetadata, + body io.Reader, params map[string]string, bodyStyle string) (*http.Request, error) { + + var opaque string + urlStr := "http://" + bucket + "." + client.endPoint + "/" + + encodedObject := utils.NosUrlEncode(object) + urlStr += encodedObject + opaque = urlStr + + v := url.Values{} + for key, val := range params { + v.Add(key, val) + } + + if len(v) > 0 { + urlStr += "?" + v.Encode() + } + + request, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + request.URL.Opaque = opaque + //add http header + //request.Header.Set(nosconst.DATE, (time.Now().Format(nosconst.RFC1123_GMT))) + request.Header.Set(nosconst.DATE, (time.Now().UTC().Format(nosconst.RFC1123_GMT))) + request.Header.Set(nosconst.NOS_ENTITY_TYPE, bodyStyle) + request.Header.Set(nosconst.USER_AGENT, utils.InitUserAgent()) + + if metadata != nil { + if metadata.Metadata != nil { + for key, value := range metadata.Metadata { + if value != "" { + request.Header.Set(key, value) + } + } + } + } + + if client.accessKey != "" && client.secretKey != "" { + request.Header.Set(nosconst.AUTHORIZATION, + auth.SignRequest(request, client.accessKey, client.secretKey, bucket, encodedObject)) + } + + return request, nil +} + +func (client *NosClient) CreateBucket(bucketName string, location nosconst.Location, + acl nosconst.Acl) error { + var locationConstraint string + switch location { + case nosconst.HZ: + locationConstraint = "HZ" + default: + return errors.New("unsupported Location") + } + + var aclString string + + switch acl { + case nosconst.PUBLICREAD: + aclString = "public-read" + case nosconst.PRIVATE: + aclString = "private" + } + + request := &model.CreateBucketRequest{ + Location: locationConstraint, + } + body, err := xml.Marshal(request) + if err != nil { + return err + } + + //Metadata + metadata := &model.ObjectMetadata{ + Metadata: map[string]string{ + nosconst.X_NOS_ACL: aclString, + }, + } + + req, err := client.getNosRequest("PUT", bucketName, "", + metadata, bytes.NewReader(body), nil, nosconst.XML_TYPE) + + resp, err := client.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return nil + } else { + err := utils.ProcessServerError(resp, bucketName, "") + return err + } +} + +func (client *NosClient) PutObjectByStream(putObjectRequest *model.PutObjectRequest) (*model.ObjectResult, error) { + if putObjectRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + var contentLength int64 + if putObjectRequest.Metadata != nil { + contentLength = putObjectRequest.Metadata.ContentLength + } + + err := utils.VerifyParamsWithLength(putObjectRequest.Bucket, putObjectRequest.Object, contentLength) + if err != nil { + return nil, err + } + + request, err := client.getNosRequest("PUT", putObjectRequest.Bucket, putObjectRequest.Object, + putObjectRequest.Metadata, putObjectRequest.Body, nil, nosconst.JSON_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode = ", resp.StatusCode) + if resp.StatusCode == http.StatusOK { + requestid, etag := utils.PopulateResponseHeader(resp) + objectResult := &model.ObjectResult{ + Etag: etag, + RequestId: requestid, + } + + return objectResult, nil + } else { + err := utils.ProcessServerError(resp, putObjectRequest.Bucket, putObjectRequest.Object) + return nil, err + } +} + +func (client *NosClient) PutObjectByFile(putObjectRequest *model.PutObjectRequest) (*model.ObjectResult, error) { + if putObjectRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + file, err := os.Open(putObjectRequest.FilePath) + if err != nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_FILE_INVALID, "", "", err.Error()) + } + defer file.Close() + + if putObjectRequest.Metadata == nil { + putObjectRequest.Metadata = &model.ObjectMetadata{} + } + + if putObjectRequest.Metadata.ContentLength == 0 { + fi, err := file.Stat() + if err == nil { + putObjectRequest.Metadata.ContentLength = fi.Size() + } else { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_FILE_INVALID, "", "", err.Error()) + } + } + + putObjectRequest.Body = file + + return client.PutObjectByStream(putObjectRequest) +} + +func (client *NosClient) CopyObject(copyObjectRequest *model.CopyObjectRequest) error { + + if copyObjectRequest == nil { + return utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + srcBucket := copyObjectRequest.SrcBucket + srcObject := copyObjectRequest.SrcObject + destBucket := copyObjectRequest.DestBucket + destObject := copyObjectRequest.DestObject + + err := utils.VerifyParamsWithObject(destBucket, destObject) + if err != nil { + return err + } + + err = utils.VerifyParamsWithObject(srcBucket, srcObject) + if err != nil { + return utils.ProcessClientError(noserror.ERROR_CODE_SRCBUCKETANDOBJECT_ERROR, destBucket, destObject, "") + } + + copySource := "/" + utils.NosUrlEncode(srcBucket) + "/" + utils.NosUrlEncode(srcObject) + metadata := &model.ObjectMetadata{ + Metadata: map[string]string{ + nosconst.X_NOS_COPY_SOURCE: copySource, + }, + } + + request, err := client.getNosRequest("PUT", destBucket, destObject, metadata, nil, nil, nosconst.JSON_TYPE) + if err != nil { + return err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return nil + } else { + err := utils.ProcessServerError(resp, destBucket, destObject) + return err + } +} + +func (client *NosClient) MoveObject(moveObjectRequest *model.MoveObjectRequest) error { + + if moveObjectRequest == nil { + return utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + srcBucket := moveObjectRequest.SrcBucket + srcObject := moveObjectRequest.SrcObject + destBucket := moveObjectRequest.DestBucket + destObject := moveObjectRequest.DestObject + + err := utils.VerifyParamsWithObject(destBucket, destObject) + if err != nil { + return err + } + + err = utils.VerifyParamsWithObject(srcBucket, srcObject) + if err != nil { + return utils.ProcessClientError(noserror.ERROR_CODE_SRCBUCKETANDOBJECT_ERROR, destBucket, destObject, "") + } + + moveSource := "/" + srcBucket + "/" + srcObject + metadata := &model.ObjectMetadata{ + Metadata: map[string]string{ + nosconst.X_NOS_MOVE_SOURCE: utils.NosUrlEncode(moveSource), + }, + } + + request, err := client.getNosRequest("PUT", destBucket, destObject, metadata, nil, nil, nosconst.JSON_TYPE) + if err != nil { + return err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return nil + } else { + err := utils.ProcessServerError(resp, destBucket, destObject) + return err + } +} + +func (client *NosClient) DeleteObject(deleteObjectRequest *model.ObjectRequest) error { + + if deleteObjectRequest == nil { + return utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + err := utils.VerifyParamsWithObject(deleteObjectRequest.Bucket, deleteObjectRequest.Object) + if err != nil { + return err + } + + request, err := client.getNosRequest("DELETE", deleteObjectRequest.Bucket, deleteObjectRequest.Object, + nil, nil, nil, nosconst.JSON_TYPE) + if err != nil { + return err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return nil + } else { + err := utils.ProcessServerError(resp, deleteObjectRequest.Bucket, deleteObjectRequest.Object) + return err + } +} + +func (client *NosClient) DeleteMultiObjects(deleteRequest *model.DeleteMultiObjectsRequest) (*model.DeleteObjectsResult, + error) { + + if deleteRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + err := utils.VerifyParams(deleteRequest.Bucket) + if err != nil { + return nil, err + } + delectObjects := deleteRequest.DelectObjects + if delectObjects == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_DELETEMULTIOBJECTS_ERROR, "", "", "") + } + if len(delectObjects.Objects) > nosconst.MAX_FILENUMBER { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_OBJECTSBIGGER_ERROR, "", "", "") + } + + body, err := xml.Marshal(delectObjects) + if err != nil { + return nil, err + } + + contentLength := int64(len(body)) + if contentLength > nosconst.MAX_DELETEBODY { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_OBJECTSBIGGER_ERROR, "", "", "") + } + + md5Ctx := md5.New() + md5Ctx.Write(body) + cipherStr := md5Ctx.Sum(nil) + metadata := &model.ObjectMetadata{ + ContentLength: contentLength, + Metadata: map[string]string{ + "Content-MD5": hex.EncodeToString(cipherStr), + }, + } + params := map[string]string{ + "delete": "", + } + request, err := client.getNosRequest("POST", deleteRequest.Bucket, "", metadata, + bytes.NewReader(body), params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.DeleteObjectsResult{} + + err := utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + return result, nil + } else { + err := utils.ProcessServerError(resp, deleteRequest.Bucket, "") + return nil, err + } +} + +func (client *NosClient) GetObject(getObjectRequest *model.GetObjectRequest) (*model.NOSObject, error) { + + if getObjectRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + err := utils.VerifyParamsWithObject(getObjectRequest.Bucket, getObjectRequest.Object) + if err != nil { + return nil, err + } + + metadata := &model.ObjectMetadata{ + Metadata: map[string]string{ + nosconst.IfMODIFYSINCE: getObjectRequest.IfModifiedSince, + nosconst.RANGE: getObjectRequest.ObjRange, + }, + } + + request, err := client.getNosRequest("GET", getObjectRequest.Bucket, getObjectRequest.Object, metadata, + nil, nil, nosconst.JSON_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent { + nosObject := &model.NOSObject{ + Key: getObjectRequest.Object, + BucketName: getObjectRequest.Bucket, + ObjectMetadata: utils.PopulateAllHeader(resp), + Body: resp.Body, + } + return nosObject, nil + } else if resp.StatusCode == http.StatusNotModified { + return nil, nil + } else { + err := utils.ProcessServerError(resp, getObjectRequest.Bucket, getObjectRequest.Object) + resp.Body.Close() + return nil, err + } +} + +func (client *NosClient) DoesObjectExist(objectRequest *model.ObjectRequest) (bool, error) { + + if objectRequest == nil { + return false, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + err := utils.VerifyParamsWithObject(objectRequest.Bucket, objectRequest.Object) + if err != nil { + return false, err + } + + request, err := client.getNosRequest("HEAD", objectRequest.Bucket, objectRequest.Object, nil, nil, + nil, nosconst.JSON_TYPE) + if err != nil { + return false, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return false, err + } + + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } else { + err := utils.ProcessServerError(resp, objectRequest.Bucket, objectRequest.Object) + return false, err + } +} + +func (client *NosClient) GetObjectMetaData(objectRequest *model.ObjectRequest) (*model.ObjectMetadata, error) { + + if objectRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + err := utils.VerifyParamsWithObject(objectRequest.Bucket, objectRequest.Object) + if err != nil { + return nil, err + } + + request, err := client.getNosRequest("HEAD", objectRequest.Bucket, objectRequest.Object, nil, + nil, nil, nosconst.JSON_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return utils.PopulateAllHeader(resp), nil + } else { + err := utils.ProcessServerError(resp, objectRequest.Bucket, objectRequest.Object) + return nil, err + } +} + +func (client *NosClient) ListObjects(listObjectsRequest *model.ListObjectsRequest) (*model.ListObjectsResult, error) { + + if listObjectsRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := listObjectsRequest.Bucket + prefix := listObjectsRequest.Prefix + delimiter := listObjectsRequest.Delimiter + marker := listObjectsRequest.Marker + maxKeys := listObjectsRequest.MaxKeys + if maxKeys <= 0 { + maxKeys = 100 + } + + err := utils.VerifyParams(bucket) + if err != nil { + return nil, err + } + + params := map[string]string{ + nosconst.LIST_PREFIX: prefix, + nosconst.LIST_DELIMITER: delimiter, + nosconst.LIST_MARKER: marker, + nosconst.LIST_MAXKEYS: strconv.Itoa(maxKeys), + } + + request, err := client.getNosRequest("GET", bucket, "", nil, nil, params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.ListObjectsResult{} + err = utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + return result, nil + } else { + err := utils.ProcessServerError(resp, bucket, "") + return nil, err + } +} + +// multipart upload api +func (client *NosClient) InitMultiUpload(initMultiUploadRequest *model.InitMultiUploadRequest) (*model.InitMultiUploadResult, error) { + + if initMultiUploadRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := initMultiUploadRequest.Bucket + object := initMultiUploadRequest.Object + metadata := initMultiUploadRequest.Metadata + + err := utils.VerifyParamsWithObject(bucket, object) + if err != nil { + return nil, err + } + + params := map[string]string{ + "uploads": "", + } + + request, err := client.getNosRequest("POST", bucket, object, metadata, nil, params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.InitMultiUploadResult{} + err = utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + return result, nil + } else { + err := utils.ProcessServerError(resp, bucket, object) + return nil, err + } +} + +func (client *NosClient) UploadPart(uploadPartRequest *model.UploadPartRequest) (*model.ObjectResult, error) { + + if uploadPartRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := uploadPartRequest.Bucket + object := uploadPartRequest.Object + uploadId := uploadPartRequest.UploadId + partNumber := uploadPartRequest.PartNumber + content := uploadPartRequest.Content + partSize := uploadPartRequest.PartSize + contentMd5 := uploadPartRequest.ContentMd5 + + err := utils.VerifyParamsWithObject(bucket, object) + if err != nil { + return nil, err + } + metadata := &model.ObjectMetadata{} + metadata.Metadata = make(map[string]string) + if contentMd5 != "" { + metadata.Metadata[nosconst.CONTENT_MD5] = contentMd5 + } + + params := map[string]string{ + nosconst.UPLOADID: uploadId, + nosconst.PARTNUMBER: strconv.FormatInt(int64(partNumber), 10), + } + limitReader := &io.LimitedReader{ + R: bytes.NewReader(content), + N: partSize, + } + request, err := client.getNosRequest("PUT", bucket, object, metadata, limitReader, + params, nosconst.JSON_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + requestid, etag := utils.PopulateResponseHeader(resp) + objectResult := &model.ObjectResult{ + Etag: etag, + RequestId: requestid, + } + return objectResult, nil + } else { + err := utils.ProcessServerError(resp, bucket, object) + return nil, err + } +} + +func (client *NosClient) CompleteMultiUpload(completeMultiUploadRequest *model.CompleteMultiUploadRequest) ( + *model.CompleteMultiUploadResult, error) { + + if completeMultiUploadRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := completeMultiUploadRequest.Bucket + object := completeMultiUploadRequest.Object + uploadId := completeMultiUploadRequest.UploadId + parts := completeMultiUploadRequest.Parts + contentMd5 := completeMultiUploadRequest.ContentMd5 + objectMd5 := completeMultiUploadRequest.ObjectMd5 + + err := utils.VerifyParamsWithObject(bucket, object) + if err != nil { + return nil, err + } + + params := map[string]string{ + nosconst.UPLOADID: uploadId, + } + + metadata := &model.ObjectMetadata{} + metadata.Metadata = make(map[string]string) + if contentMd5 != "" { + metadata.Metadata[nosconst.CONTENT_MD5] = contentMd5 + } + if objectMd5 != "" { + metadata.Metadata[nosconst.X_NOS_OBJECT_MD5] = objectMd5 + } + + uploadParts := model.UploadParts{Parts: parts} + body, err := xml.Marshal(uploadParts) + if err != nil { + return nil, err + } + + request, err := client.getNosRequest("POST", bucket, object, metadata, bytes.NewReader(body), + params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.CompleteMultiUploadResult{} + err = utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + result.Etag = utils.RemoveQuotes(result.Etag) + return result, nil + } else { + err := utils.ProcessServerError(resp, bucket, object) + return nil, err + } +} + +func (client *NosClient) AbortMultiUpload(abortMultiUploadRequest *model.AbortMultiUploadRequest) error { + + if abortMultiUploadRequest == nil { + return utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := abortMultiUploadRequest.Bucket + object := abortMultiUploadRequest.Object + uploadId := abortMultiUploadRequest.UploadId + + err := utils.VerifyParamsWithObject(bucket, object) + if err != nil { + return err + } + + params := map[string]string{ + nosconst.UPLOADID: uploadId, + } + + request, err := client.getNosRequest("DELETE", bucket, object, nil, nil, params, nosconst.JSON_TYPE) + if err != nil { + return err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + return nil + } else { + err := utils.ProcessServerError(resp, bucket, object) + return err + } +} + +func (client *NosClient) ListUploadParts(listUploadPartsRequest *model.ListUploadPartsRequest) (*model.ListPartsResult, error) { + + if listUploadPartsRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := listUploadPartsRequest.Bucket + object := listUploadPartsRequest.Object + uploadId := listUploadPartsRequest.UploadId + maxParts := listUploadPartsRequest.MaxParts + partNumberMarker := listUploadPartsRequest.PartNumberMarker + + err := utils.VerifyParamsWithObject(bucket, object) + if err != nil { + return nil, err + } + + params := map[string]string{ + nosconst.UPLOADID: uploadId, + nosconst.MAX_PARTS: strconv.Itoa(maxParts), + nosconst.PART_NUMBER_MARKER: strconv.Itoa(partNumberMarker), + } + + request, err := client.getNosRequest("GET", bucket, object, nil, nil, params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.ListPartsResult{} + err = utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + + return result, nil + } else { + err := utils.ProcessServerError(resp, bucket, object) + return nil, err + } +} + +// This operation lists in-progress multipart uploads. +func (client *NosClient) ListMultiUploads(listMultiUploadsRequest *model.ListMultiUploadsRequest) ( + *model.ListMultiUploadsResult, error) { + + if listMultiUploadsRequest == nil { + return nil, utils.ProcessClientError(noserror.ERROR_CODE_REQUEST_ERROR, "", "", "") + } + + bucket := listMultiUploadsRequest.Bucket + err := utils.VerifyParams(bucket) + if err != nil { + return nil, err + } + + if listMultiUploadsRequest.MaxUploads == 0 { + listMultiUploadsRequest.MaxUploads = nosconst.DEFAULTVALUE + } + + params := map[string]string{ + nosconst.UPLOADS: "", + nosconst.LIST_KEY_MARKER: listMultiUploadsRequest.KeyMarker, + nosconst.LIST_MAX_UPLOADS: strconv.Itoa(listMultiUploadsRequest.MaxUploads), + } + + request, err := client.getNosRequest("GET", bucket, "", nil, nil, params, nosconst.XML_TYPE) + if err != nil { + return nil, err + } + + resp, err := client.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + client.Log.Debug("resp.StatusCode=", resp.StatusCode) + + if resp.StatusCode == http.StatusOK { + result := &model.ListMultiUploadsResult{} + err = utils.ParseXmlBody(resp.Body, result) + if err != nil { + return nil, err + } + + return result, nil + } else { + err := utils.ProcessServerError(resp, bucket, "") + return nil, err + } + +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosconst/nosconst.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosconst/nosconst.go new file mode 100644 index 00000000..3d444b55 --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/nosconst/nosconst.go @@ -0,0 +1,73 @@ +package nosconst + + +const ( + HZ = iota +) + +type Location int +type Acl int + + +const( + PRIVATE = iota + PUBLICREAD +) + +const ( + DEFAULT_MAXBUFFERSIZE = 1024 * 1024 + MAX_FILESIZE = 100 * 1024 * 1024 + MIN_FILESIZE = 16 * 1024 + MAX_FILENUMBER = 1000 + DEFAULTVALUE = 1000 + MAX_DELETEBODY = 2 * 1024 * 1024 + + RFC1123_NOS = "Mon, 02 Jan 2006 15:04:05 Asia/Shanghai" + RFC1123_GMT = "Mon, 02 Jan 2006 15:04:05 GMT" + CONTENT_LENGTH = "Content-Length" + CONTENT_TYPE = "Content-Type" + CONTENT_MD5 = "Content-Md5" + LAST_MODIFIED = "Last-Modified" + USER_AGENT = "User-Agent" + DATE = "Date" + AUTHORIZATION = "Authorization" + RANGE = "Range" + IfMODIFYSINCE = "If-Modified-Since" + LIST_PREFIX = "prefix" + LIST_DELIMITER = "delimiter" + LIST_MARKER = "marker" + LIST_MAXKEYS = "max-keys" + UPLOADID = "uploadId" + MAX_PARTS = "max-parts" + PARTNUMBER = "partNumber" + UPLOADS = "uploads" + PART_NUMBER_MARKER = "part-number-marker" + LIST_KEY_MARKER = "key-marker" + LIST_MAX_UPLOADS = "max-uploads" + LIST_UPLOADID_MARKER = "upload-id-marker" + + ETAG = "Etag" + NOS_USER_METADATA_PREFIX = "X-Nos-Meta-" + NOS_ENTITY_TYPE = "X-Nos-Entity-Type" + NOS_VERSION_ID = "X-Nos-Version-Id" + X_NOS_OBJECT_NAME = "X-Nos-Object-Name" + X_NOS_REQUEST_ID = "X-Nos-Request-Id" + X_NOS_OBJECT_MD5 = "X-Nos-Object-Md5" + X_NOS_COPY_SOURCE = "x-nos-copy-source" + X_NOS_MOVE_SOURCE = "x-nos-move-source" + X_NOS_ACL = "x-nos-acl" + + ORIG_CONTENT_MD5 = "Content-MD5" + ORIG_ETAG = "ETag" + ORIG_NOS_USER_METADATA_PREFIX = "x-nos-meta-" + ORIG_NOS_VERSION_ID = "x-nos-version-id" + ORIG_X_NOS_OBJECT_NAME = "x-nos-object-name" + ORIG_X_NOS_REQUEST_ID = "x-nos-request-id" + ORIG_X_NOS_OBJECT_MD5 = "x-nos-Object-md5" + + SDKNAME = "nos-golang-sdk" + VERSION = "1.0.0" + + JSON_TYPE = "json" + XML_TYPE = "xml" +) diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/noserror/noserror.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/noserror/noserror.go new file mode 100644 index 00000000..30224f59 --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/noserror/noserror.go @@ -0,0 +1,142 @@ +package noserror + +import ( + "encoding/xml" + "strconv" +) + +const ( + BASE_ERROR_CODE = 400 + + /*client error codes*/ + //4xx errors + ERROR_CODE_CFG_ENDPOINT = BASE_ERROR_CODE + 20 + ERROR_CODE_CFG_CONNECT_TIMEOUT = BASE_ERROR_CODE + 21 + ERROR_CODE_CFG_READWRITE_TIMEOUT = BASE_ERROR_CODE + 22 + ERROR_CODE_CFG_MAXIDLECONNECT = BASE_ERROR_CODE + 23 + ERROR_CODE_BUCKET_INVALID = BASE_ERROR_CODE + 30 + ERROR_CODE_OBJECT_INVALID = BASE_ERROR_CODE + 31 + ERROR_CODE_FILELENGTH_INVALID = BASE_ERROR_CODE + 32 + ERROR_CODE_FILE_INVALID = BASE_ERROR_CODE + 33 + ERROR_CODE_REQUEST_ERROR = BASE_ERROR_CODE + 34 + ERROR_CODE_READCONTENT_ERROR = BASE_ERROR_CODE + 35 + ERROR_CODE_PARSEJSON_ERROR = BASE_ERROR_CODE + 36 + ERROR_CODE_PARSEXML_ERROR = BASE_ERROR_CODE + 37 + ERROR_CODE_SRCBUCKETANDOBJECT_ERROR = BASE_ERROR_CODE + 38 + ERROR_CODE_DELETEMULTIOBJECTS_ERROR = BASE_ERROR_CODE + 39 + ERROR_CODE_OBJECTSBIGGER_ERROR = BASE_ERROR_CODE + 40 + ERROR_CODE_PARTLENGTH_ERROR = BASE_ERROR_CODE + 41 + + /*short message code*/ + ERROR_MSG_CFG_ENDPOINT = "Config: InvalidEndpoint" + ERROR_MSG_CFG_CONNECT_TIMEOUT = "Config: InvalidConnectionTimeout" + ERROR_MSG_CFG_READWRITE_TIMEOUT = "Config: InvalidReadWriteTimeout" + ERROR_MSG_CFG_MAXIDLECONNECT = "Config: InvalidMaxIdleConnect" + ERROR_MSG_BUCKET_INVALID = "InvalidBucketName" + ERROR_MSG_OBJECT_INVALID = "InvalidObjectName" + ERROR_MSG_FILELENGTH_INVALID = "InvalidFileSize" + ERROR_MSG_FILE_INVALID = "Failed to open file. " + ERROR_MSG_REQUEST_ERROR = "Request is nil" + ERROR_MSG_READCONTENT_ERROR = "ReadContentError" + ERROR_MSG_PARSEJSON_ERROR = "InvalidJSONContent" + ERROR_MSG_PARSEXML_ERROR = "InvalidXmlContent" + ERROR_MSG_SRCBUCKETANDOBJECT_ERROR = "SrcBucket or SrcObject is invalid" + ERROR_MSG_DELETEMULTIOBJECTS_ERROR = "InvalidDeleteMultiObjects" + ERROR_MSG_OBJECTSBIGGER_ERROR = "InvalidObjects: the number is < 1000 and size of body is < 2M" + ERROR_MSG_PARTLENGTH_ERROR = "InvalidPartLength: the length should be between 16k and 100M" +) + +// mErrHttpCodeMap is map of Http Code +var mErrMsgMap map[int]string + +func Init() { + mErrMsgMap = make(map[int]string) + + //init err msg map + mErrMsgMap[ERROR_CODE_CFG_ENDPOINT] = ERROR_MSG_CFG_ENDPOINT + mErrMsgMap[ERROR_CODE_CFG_CONNECT_TIMEOUT] = ERROR_MSG_CFG_CONNECT_TIMEOUT + mErrMsgMap[ERROR_CODE_CFG_READWRITE_TIMEOUT] = ERROR_MSG_CFG_READWRITE_TIMEOUT + mErrMsgMap[ERROR_CODE_CFG_MAXIDLECONNECT] = ERROR_MSG_CFG_MAXIDLECONNECT + mErrMsgMap[ERROR_CODE_BUCKET_INVALID] = ERROR_MSG_BUCKET_INVALID + mErrMsgMap[ERROR_CODE_OBJECT_INVALID] = ERROR_MSG_OBJECT_INVALID + mErrMsgMap[ERROR_CODE_FILELENGTH_INVALID] = ERROR_MSG_FILELENGTH_INVALID + mErrMsgMap[ERROR_CODE_FILE_INVALID] = ERROR_MSG_FILE_INVALID + mErrMsgMap[ERROR_CODE_REQUEST_ERROR] = ERROR_MSG_REQUEST_ERROR + mErrMsgMap[ERROR_CODE_READCONTENT_ERROR] = ERROR_MSG_READCONTENT_ERROR + mErrMsgMap[ERROR_CODE_PARSEJSON_ERROR] = ERROR_MSG_PARSEJSON_ERROR + mErrMsgMap[ERROR_CODE_PARSEXML_ERROR] = ERROR_MSG_PARSEXML_ERROR + mErrMsgMap[ERROR_CODE_SRCBUCKETANDOBJECT_ERROR] = ERROR_MSG_SRCBUCKETANDOBJECT_ERROR + mErrMsgMap[ERROR_CODE_DELETEMULTIOBJECTS_ERROR] = ERROR_MSG_DELETEMULTIOBJECTS_ERROR + mErrMsgMap[ERROR_CODE_OBJECTSBIGGER_ERROR] = ERROR_MSG_OBJECTSBIGGER_ERROR + mErrMsgMap[ERROR_CODE_PARTLENGTH_ERROR] = ERROR_MSG_PARTLENGTH_ERROR +} + +type NosError struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + NosRequestId string `xml:"RequestId" json:"RequestId"` +} + +func NewNosError(code string, message string, resource string, requestid string) *NosError { + nosError := &(NosError{ + Code: code, + Message: message, + Resource: resource, + NosRequestId: requestid, + }) + return nosError +} + +func (nosError *NosError) Error() string { + return "Code = " + nosError.Code + + ", Message = " + nosError.Message + + ", Resource = " + nosError.Resource + + ", NosRequestId = " + nosError.NosRequestId +} + +type ServerError struct { + StatusCode int + RequestId string + NosErr *NosError `json:"Error"` +} + +func NewServerError(errCode int, requestid string, nosErr *NosError) error { + serverError := &(ServerError{ + StatusCode: errCode, + RequestId: requestid, + NosErr: nosErr, + }) + return serverError +} + +func (serverError *ServerError) Error() string { + return "StatusCode = " + strconv.Itoa(serverError.StatusCode) + + ", RequestId = " + serverError.RequestId + + ", NosError: " + serverError.NosErr.Error() +} + +type ClientError struct { + StatusCode int + Resource string + Message string +} + +func NewClientError(errCode int, resource string, msg string) error { + clientError := &(ClientError{ + StatusCode: errCode, + Resource: resource, + Message: mErrMsgMap[errCode], + }) + if msg != ""{ + clientError.Message += ": " + msg + } + return clientError +} + +func (clientError *ClientError) Error() string { + return "StatusCode = " + strconv.Itoa(clientError.StatusCode) + + ", Resource = " + clientError.Resource + + ", Message = " + clientError.Message +} diff --git a/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/utils/utils.go b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/utils/utils.go new file mode 100644 index 00000000..128a5ea5 --- /dev/null +++ b/vendor/github.com/NetEase-Object-Storage/nos-golang-sdk/utils/utils.go @@ -0,0 +1,225 @@ +package utils + +import ( + "encoding/json" + "encoding/xml" + "github.com/NetEase-Object-Storage/nos-golang-sdk/model" + "github.com/NetEase-Object-Storage/nos-golang-sdk/nosconst" + "github.com/NetEase-Object-Storage/nos-golang-sdk/noserror" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "unicode" + "runtime" +) + +// VerifyObjectName check if the BucketName is legal +func VerifyBucketName(bucketName string) bool { + if bucketName == "" { + return false + } + + length := len(bucketName) + if length < 3 || length > 63 { + return false + } + + if bucketName != strings.ToLower(bucketName) { + return false + } + + if strings.Contains(bucketName, ".") { + return false + } + + specialCharactersCursor := false + for i := 0; i != length; i++ { + ch := bucketName[i] + if !unicode.IsLetter(rune(ch)) && !unicode.IsDigit(rune(ch)) { + //no start or end with special characters + if i == 0 || i == (length-1) { + return false + } + //no Continuous two speical characters + if specialCharactersCursor { + return false + } + specialCharactersCursor = true + if ch != '-' { + return false + } + } else { + specialCharactersCursor = false + } + } + + return true +} + +// VerifyObjectName check if the object name is legal +func VerifyObjectName(object string) bool { + if object == "" { + return false + } + if len(object) > 1000 { + return false + } + return true +} + +func VerifyParams(bucket string) error { + if !VerifyBucketName(bucket) { + return ProcessClientError(noserror.ERROR_CODE_BUCKET_INVALID, bucket, "", "") + } + + return nil +} + +func VerifyParamsWithObject(bucket string, object string) error { + + err := VerifyParams(bucket) + if err != nil { + return err + } + + if !VerifyObjectName(object) { + return ProcessClientError(noserror.ERROR_CODE_OBJECT_INVALID, bucket, object, "") + } + + return nil +} + +func VerifyParamsWithLength(bucket string, object string, length int64) error { + + err := VerifyParamsWithObject(bucket, object) + if err != nil { + return err + } + + if length > nosconst.MAX_FILESIZE { + return ProcessClientError(noserror.ERROR_CODE_FILELENGTH_INVALID, bucket, object, "") + } + + return nil +} + +func ParseXmlBody(body io.Reader, value interface{}) error { + content, err := ioutil.ReadAll(body) + if err != nil { + return err + } + err = xml.Unmarshal(content, value) + if err != nil { + return err + } + + return nil +} + +func RemoveQuotes(orig string) string { + s := strings.TrimSpace(orig) + + if strings.HasPrefix(s, "\"") { + s = s[1:len(s)] + } + + if strings.HasSuffix(s, "\"") { + s = s[0 : len(s)-1] + } + + return s +} + +func PopulateResponseHeader(response *http.Response) (requestid, etag string) { + hdr := response.Header + + etag = RemoveQuotes(hdr.Get(nosconst.ETAG)) + requestid = hdr.Get(nosconst.X_NOS_REQUEST_ID) + + return requestid, etag +} + +func PopulateAllHeader(response *http.Response) *model.ObjectMetadata { + hdr := response.Header + result := &model.ObjectMetadata{ + Metadata: map[string]string{}, + } + + for key, value := range hdr { + if value != nil { + if strings.EqualFold(key, nosconst.CONTENT_LENGTH) { + result.ContentLength, _ = strconv.ParseInt(value[0], 10, 64) + } else if strings.EqualFold(key, nosconst.ETAG) { + result.Metadata[nosconst.ETAG] = RemoveQuotes(value[0]) + } else { + result.Metadata[key] = value[0] + } + } + } + + return result +} + +func ProcessClientError(statCode int, bucket, object string, msg string) error { + var resource string + if bucket != "" { + resource += "/" + bucket + } + if object != "" { + resource += "/" + object + } + clientError := noserror.NewClientError(statCode, resource, msg) + + return clientError +} + +func ProcessServerError(response *http.Response, bucketName, objectName string) error { + var nosErr *noserror.NosError + + resource := bucketName + "/" + objectName + requestId := response.Header.Get(nosconst.X_NOS_REQUEST_ID) + contenttype := response.Header.Get(nosconst.CONTENT_TYPE) + + serverError := &noserror.ServerError{ + StatusCode: response.StatusCode, + RequestId: requestId, + } + + content, err := ioutil.ReadAll(response.Body) + if err != nil { + nosErr = noserror.NewNosError("", noserror.ERROR_MSG_READCONTENT_ERROR, resource, requestId) + serverError.NosErr = nosErr + } else { + if strings.Contains(contenttype, nosconst.JSON_TYPE) { + if err = json.Unmarshal(content, &serverError); err != nil { + nosErr = noserror.NewNosError("", noserror.ERROR_MSG_PARSEJSON_ERROR, resource, requestId) + serverError.NosErr = nosErr + } + } else { + if err = xml.Unmarshal(content, &nosErr); err != nil { + nosErr = noserror.NewNosError("", noserror.ERROR_MSG_PARSEXML_ERROR, resource, requestId) + } + serverError.NosErr = nosErr + } + } + + return serverError +} + +func NosUrlEncode(origin string) string { + str := strings.Replace(url.QueryEscape(origin), "+", "%20", -1) + str = strings.Replace(str, "~", "%7E", -1) + str = strings.Replace(str, "%2A", "*", -1) + return str +} + +func InitUserAgent() string { + str := nosconst.SDKNAME + "/" + nosconst.VERSION + " " + str += runtime.GOOS + "/" + runtime.GOARCH + "/" + str += "golang version:" + runtime.Version() + + return str +} \ No newline at end of file